ARM: s3c64xx: bring back notes from removed debug-macro.S
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
58
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_ulp.h"
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
64 #include "bnxt_dcb.h"
65 #include "bnxt_xdp.h"
66 #include "bnxt_vfr.h"
67 #include "bnxt_tc.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
70
71 #define BNXT_TX_TIMEOUT         (5 * HZ)
72
73 MODULE_LICENSE("GPL");
74 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
75
76 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
77 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
78 #define BNXT_RX_COPY_THRESH 256
79
80 #define BNXT_TX_PUSH_THRESH 164
81
82 enum board_idx {
83         BCM57301,
84         BCM57302,
85         BCM57304,
86         BCM57417_NPAR,
87         BCM58700,
88         BCM57311,
89         BCM57312,
90         BCM57402,
91         BCM57404,
92         BCM57406,
93         BCM57402_NPAR,
94         BCM57407,
95         BCM57412,
96         BCM57414,
97         BCM57416,
98         BCM57417,
99         BCM57412_NPAR,
100         BCM57314,
101         BCM57417_SFP,
102         BCM57416_SFP,
103         BCM57404_NPAR,
104         BCM57406_NPAR,
105         BCM57407_SFP,
106         BCM57407_NPAR,
107         BCM57414_NPAR,
108         BCM57416_NPAR,
109         BCM57452,
110         BCM57454,
111         BCM5745x_NPAR,
112         BCM57508,
113         BCM57504,
114         BCM57502,
115         BCM57508_NPAR,
116         BCM57504_NPAR,
117         BCM57502_NPAR,
118         BCM58802,
119         BCM58804,
120         BCM58808,
121         NETXTREME_E_VF,
122         NETXTREME_C_VF,
123         NETXTREME_S_VF,
124         NETXTREME_E_P5_VF,
125 };
126
127 /* indexed by enum above */
128 static const struct {
129         char *name;
130 } board_info[] = {
131         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
132         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
133         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
134         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
135         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
136         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
137         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
138         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
139         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
140         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
141         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
142         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
143         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
144         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
145         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
146         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
147         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
148         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
149         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
150         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
151         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
152         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
153         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
154         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
155         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
156         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
157         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
158         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
159         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
160         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
161         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
162         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
163         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
164         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
165         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
166         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
167         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
168         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
169         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
170         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
171         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
172         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
173 };
174
175 static const struct pci_device_id bnxt_pci_tbl[] = {
176         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
177         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
178         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
179         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
180         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
181         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
182         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
183         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
184         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
185         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
186         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
187         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
188         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
189         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
190         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
191         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
192         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
193         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
194         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
195         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
196         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
197         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
198         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
199         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
200         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
201         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
202         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
203         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
204         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
205         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
206         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
207         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
208         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
209         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
210         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
211         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
212         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
213         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
214         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
215         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
216         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
217         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
218         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
219         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
220         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
221         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
222 #ifdef CONFIG_BNXT_SRIOV
223         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
224         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
225         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
226         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
227         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
228         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
229         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
230         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
231         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
232         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
233         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
234 #endif
235         { 0 }
236 };
237
238 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
239
240 static const u16 bnxt_vf_req_snif[] = {
241         HWRM_FUNC_CFG,
242         HWRM_FUNC_VF_CFG,
243         HWRM_PORT_PHY_QCFG,
244         HWRM_CFA_L2_FILTER_ALLOC,
245 };
246
247 static const u16 bnxt_async_events_arr[] = {
248         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
249         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
250         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
251         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
252         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
253         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
254         ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
255         ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
256         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
257 };
258
259 static struct workqueue_struct *bnxt_pf_wq;
260
261 static bool bnxt_vf_pciid(enum board_idx idx)
262 {
263         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
264                 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
265 }
266
267 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
268 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
269 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
270
271 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
272                 writel(DB_CP_IRQ_DIS_FLAGS, db)
273
274 #define BNXT_DB_CQ(db, idx)                                             \
275         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
276
277 #define BNXT_DB_NQ_P5(db, idx)                                          \
278         writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
279
280 #define BNXT_DB_CQ_ARM(db, idx)                                         \
281         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
282
283 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
284         writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
285
286 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
287 {
288         if (bp->flags & BNXT_FLAG_CHIP_P5)
289                 BNXT_DB_NQ_P5(db, idx);
290         else
291                 BNXT_DB_CQ(db, idx);
292 }
293
294 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
295 {
296         if (bp->flags & BNXT_FLAG_CHIP_P5)
297                 BNXT_DB_NQ_ARM_P5(db, idx);
298         else
299                 BNXT_DB_CQ_ARM(db, idx);
300 }
301
302 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
303 {
304         if (bp->flags & BNXT_FLAG_CHIP_P5)
305                 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
306                        db->doorbell);
307         else
308                 BNXT_DB_CQ(db, idx);
309 }
310
311 const u16 bnxt_lhint_arr[] = {
312         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
313         TX_BD_FLAGS_LHINT_512_TO_1023,
314         TX_BD_FLAGS_LHINT_1024_TO_2047,
315         TX_BD_FLAGS_LHINT_1024_TO_2047,
316         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
317         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
318         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
319         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
320         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
321         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
322         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
323         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
324         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
325         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
326         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
327         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
328         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
329         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
330         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
331 };
332
333 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
334 {
335         struct metadata_dst *md_dst = skb_metadata_dst(skb);
336
337         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
338                 return 0;
339
340         return md_dst->u.port_info.port_id;
341 }
342
343 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
344 {
345         struct bnxt *bp = netdev_priv(dev);
346         struct tx_bd *txbd;
347         struct tx_bd_ext *txbd1;
348         struct netdev_queue *txq;
349         int i;
350         dma_addr_t mapping;
351         unsigned int length, pad = 0;
352         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
353         u16 prod, last_frag;
354         struct pci_dev *pdev = bp->pdev;
355         struct bnxt_tx_ring_info *txr;
356         struct bnxt_sw_tx_bd *tx_buf;
357
358         i = skb_get_queue_mapping(skb);
359         if (unlikely(i >= bp->tx_nr_rings)) {
360                 dev_kfree_skb_any(skb);
361                 return NETDEV_TX_OK;
362         }
363
364         txq = netdev_get_tx_queue(dev, i);
365         txr = &bp->tx_ring[bp->tx_ring_map[i]];
366         prod = txr->tx_prod;
367
368         free_size = bnxt_tx_avail(bp, txr);
369         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
370                 netif_tx_stop_queue(txq);
371                 return NETDEV_TX_BUSY;
372         }
373
374         length = skb->len;
375         len = skb_headlen(skb);
376         last_frag = skb_shinfo(skb)->nr_frags;
377
378         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
379
380         txbd->tx_bd_opaque = prod;
381
382         tx_buf = &txr->tx_buf_ring[prod];
383         tx_buf->skb = skb;
384         tx_buf->nr_frags = last_frag;
385
386         vlan_tag_flags = 0;
387         cfa_action = bnxt_xmit_get_cfa_action(skb);
388         if (skb_vlan_tag_present(skb)) {
389                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
390                                  skb_vlan_tag_get(skb);
391                 /* Currently supports 8021Q, 8021AD vlan offloads
392                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
393                  */
394                 if (skb->vlan_proto == htons(ETH_P_8021Q))
395                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
396         }
397
398         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
399                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
400                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
401                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
402                 void __iomem *db = txr->tx_db.doorbell;
403                 void *pdata = tx_push_buf->data;
404                 u64 *end;
405                 int j, push_len;
406
407                 /* Set COAL_NOW to be ready quickly for the next push */
408                 tx_push->tx_bd_len_flags_type =
409                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
410                                         TX_BD_TYPE_LONG_TX_BD |
411                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
412                                         TX_BD_FLAGS_COAL_NOW |
413                                         TX_BD_FLAGS_PACKET_END |
414                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
415
416                 if (skb->ip_summed == CHECKSUM_PARTIAL)
417                         tx_push1->tx_bd_hsize_lflags =
418                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
419                 else
420                         tx_push1->tx_bd_hsize_lflags = 0;
421
422                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
423                 tx_push1->tx_bd_cfa_action =
424                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
425
426                 end = pdata + length;
427                 end = PTR_ALIGN(end, 8) - 1;
428                 *end = 0;
429
430                 skb_copy_from_linear_data(skb, pdata, len);
431                 pdata += len;
432                 for (j = 0; j < last_frag; j++) {
433                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
434                         void *fptr;
435
436                         fptr = skb_frag_address_safe(frag);
437                         if (!fptr)
438                                 goto normal_tx;
439
440                         memcpy(pdata, fptr, skb_frag_size(frag));
441                         pdata += skb_frag_size(frag);
442                 }
443
444                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
445                 txbd->tx_bd_haddr = txr->data_mapping;
446                 prod = NEXT_TX(prod);
447                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
448                 memcpy(txbd, tx_push1, sizeof(*txbd));
449                 prod = NEXT_TX(prod);
450                 tx_push->doorbell =
451                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
452                 txr->tx_prod = prod;
453
454                 tx_buf->is_push = 1;
455                 netdev_tx_sent_queue(txq, skb->len);
456                 wmb();  /* Sync is_push and byte queue before pushing data */
457
458                 push_len = (length + sizeof(*tx_push) + 7) / 8;
459                 if (push_len > 16) {
460                         __iowrite64_copy(db, tx_push_buf, 16);
461                         __iowrite32_copy(db + 4, tx_push_buf + 1,
462                                          (push_len - 16) << 1);
463                 } else {
464                         __iowrite64_copy(db, tx_push_buf, push_len);
465                 }
466
467                 goto tx_done;
468         }
469
470 normal_tx:
471         if (length < BNXT_MIN_PKT_SIZE) {
472                 pad = BNXT_MIN_PKT_SIZE - length;
473                 if (skb_pad(skb, pad)) {
474                         /* SKB already freed. */
475                         tx_buf->skb = NULL;
476                         return NETDEV_TX_OK;
477                 }
478                 length = BNXT_MIN_PKT_SIZE;
479         }
480
481         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
482
483         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
484                 dev_kfree_skb_any(skb);
485                 tx_buf->skb = NULL;
486                 return NETDEV_TX_OK;
487         }
488
489         dma_unmap_addr_set(tx_buf, mapping, mapping);
490         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
491                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
492
493         txbd->tx_bd_haddr = cpu_to_le64(mapping);
494
495         prod = NEXT_TX(prod);
496         txbd1 = (struct tx_bd_ext *)
497                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
498
499         txbd1->tx_bd_hsize_lflags = 0;
500         if (skb_is_gso(skb)) {
501                 u32 hdr_len;
502
503                 if (skb->encapsulation)
504                         hdr_len = skb_inner_network_offset(skb) +
505                                 skb_inner_network_header_len(skb) +
506                                 inner_tcp_hdrlen(skb);
507                 else
508                         hdr_len = skb_transport_offset(skb) +
509                                 tcp_hdrlen(skb);
510
511                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
512                                         TX_BD_FLAGS_T_IPID |
513                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
514                 length = skb_shinfo(skb)->gso_size;
515                 txbd1->tx_bd_mss = cpu_to_le32(length);
516                 length += hdr_len;
517         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
518                 txbd1->tx_bd_hsize_lflags =
519                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
520                 txbd1->tx_bd_mss = 0;
521         }
522
523         length >>= 9;
524         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
525                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
526                                      skb->len);
527                 i = 0;
528                 goto tx_dma_error;
529         }
530         flags |= bnxt_lhint_arr[length];
531         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
532
533         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
534         txbd1->tx_bd_cfa_action =
535                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
536         for (i = 0; i < last_frag; i++) {
537                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
538
539                 prod = NEXT_TX(prod);
540                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
541
542                 len = skb_frag_size(frag);
543                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
544                                            DMA_TO_DEVICE);
545
546                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
547                         goto tx_dma_error;
548
549                 tx_buf = &txr->tx_buf_ring[prod];
550                 dma_unmap_addr_set(tx_buf, mapping, mapping);
551
552                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
553
554                 flags = len << TX_BD_LEN_SHIFT;
555                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
556         }
557
558         flags &= ~TX_BD_LEN;
559         txbd->tx_bd_len_flags_type =
560                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
561                             TX_BD_FLAGS_PACKET_END);
562
563         netdev_tx_sent_queue(txq, skb->len);
564
565         /* Sync BD data before updating doorbell */
566         wmb();
567
568         prod = NEXT_TX(prod);
569         txr->tx_prod = prod;
570
571         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
572                 bnxt_db_write(bp, &txr->tx_db, prod);
573
574 tx_done:
575
576         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
577                 if (netdev_xmit_more() && !tx_buf->is_push)
578                         bnxt_db_write(bp, &txr->tx_db, prod);
579
580                 netif_tx_stop_queue(txq);
581
582                 /* netif_tx_stop_queue() must be done before checking
583                  * tx index in bnxt_tx_avail() below, because in
584                  * bnxt_tx_int(), we update tx index before checking for
585                  * netif_tx_queue_stopped().
586                  */
587                 smp_mb();
588                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
589                         netif_tx_wake_queue(txq);
590         }
591         return NETDEV_TX_OK;
592
593 tx_dma_error:
594         last_frag = i;
595
596         /* start back at beginning and unmap skb */
597         prod = txr->tx_prod;
598         tx_buf = &txr->tx_buf_ring[prod];
599         tx_buf->skb = NULL;
600         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
601                          skb_headlen(skb), PCI_DMA_TODEVICE);
602         prod = NEXT_TX(prod);
603
604         /* unmap remaining mapped pages */
605         for (i = 0; i < last_frag; i++) {
606                 prod = NEXT_TX(prod);
607                 tx_buf = &txr->tx_buf_ring[prod];
608                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
609                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
610                                PCI_DMA_TODEVICE);
611         }
612
613         dev_kfree_skb_any(skb);
614         return NETDEV_TX_OK;
615 }
616
617 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
618 {
619         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
620         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
621         u16 cons = txr->tx_cons;
622         struct pci_dev *pdev = bp->pdev;
623         int i;
624         unsigned int tx_bytes = 0;
625
626         for (i = 0; i < nr_pkts; i++) {
627                 struct bnxt_sw_tx_bd *tx_buf;
628                 struct sk_buff *skb;
629                 int j, last;
630
631                 tx_buf = &txr->tx_buf_ring[cons];
632                 cons = NEXT_TX(cons);
633                 skb = tx_buf->skb;
634                 tx_buf->skb = NULL;
635
636                 if (tx_buf->is_push) {
637                         tx_buf->is_push = 0;
638                         goto next_tx_int;
639                 }
640
641                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
642                                  skb_headlen(skb), PCI_DMA_TODEVICE);
643                 last = tx_buf->nr_frags;
644
645                 for (j = 0; j < last; j++) {
646                         cons = NEXT_TX(cons);
647                         tx_buf = &txr->tx_buf_ring[cons];
648                         dma_unmap_page(
649                                 &pdev->dev,
650                                 dma_unmap_addr(tx_buf, mapping),
651                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
652                                 PCI_DMA_TODEVICE);
653                 }
654
655 next_tx_int:
656                 cons = NEXT_TX(cons);
657
658                 tx_bytes += skb->len;
659                 dev_kfree_skb_any(skb);
660         }
661
662         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
663         txr->tx_cons = cons;
664
665         /* Need to make the tx_cons update visible to bnxt_start_xmit()
666          * before checking for netif_tx_queue_stopped().  Without the
667          * memory barrier, there is a small possibility that bnxt_start_xmit()
668          * will miss it and cause the queue to be stopped forever.
669          */
670         smp_mb();
671
672         if (unlikely(netif_tx_queue_stopped(txq)) &&
673             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
674                 __netif_tx_lock(txq, smp_processor_id());
675                 if (netif_tx_queue_stopped(txq) &&
676                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
677                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
678                         netif_tx_wake_queue(txq);
679                 __netif_tx_unlock(txq);
680         }
681 }
682
683 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
684                                          struct bnxt_rx_ring_info *rxr,
685                                          gfp_t gfp)
686 {
687         struct device *dev = &bp->pdev->dev;
688         struct page *page;
689
690         page = page_pool_dev_alloc_pages(rxr->page_pool);
691         if (!page)
692                 return NULL;
693
694         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
695                                       DMA_ATTR_WEAK_ORDERING);
696         if (dma_mapping_error(dev, *mapping)) {
697                 page_pool_recycle_direct(rxr->page_pool, page);
698                 return NULL;
699         }
700         *mapping += bp->rx_dma_offset;
701         return page;
702 }
703
704 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
705                                        gfp_t gfp)
706 {
707         u8 *data;
708         struct pci_dev *pdev = bp->pdev;
709
710         data = kmalloc(bp->rx_buf_size, gfp);
711         if (!data)
712                 return NULL;
713
714         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
715                                         bp->rx_buf_use_size, bp->rx_dir,
716                                         DMA_ATTR_WEAK_ORDERING);
717
718         if (dma_mapping_error(&pdev->dev, *mapping)) {
719                 kfree(data);
720                 data = NULL;
721         }
722         return data;
723 }
724
725 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
726                        u16 prod, gfp_t gfp)
727 {
728         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
729         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
730         dma_addr_t mapping;
731
732         if (BNXT_RX_PAGE_MODE(bp)) {
733                 struct page *page =
734                         __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
735
736                 if (!page)
737                         return -ENOMEM;
738
739                 rx_buf->data = page;
740                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
741         } else {
742                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
743
744                 if (!data)
745                         return -ENOMEM;
746
747                 rx_buf->data = data;
748                 rx_buf->data_ptr = data + bp->rx_offset;
749         }
750         rx_buf->mapping = mapping;
751
752         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
753         return 0;
754 }
755
756 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
757 {
758         u16 prod = rxr->rx_prod;
759         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
760         struct rx_bd *cons_bd, *prod_bd;
761
762         prod_rx_buf = &rxr->rx_buf_ring[prod];
763         cons_rx_buf = &rxr->rx_buf_ring[cons];
764
765         prod_rx_buf->data = data;
766         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
767
768         prod_rx_buf->mapping = cons_rx_buf->mapping;
769
770         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
771         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
772
773         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
774 }
775
776 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
777 {
778         u16 next, max = rxr->rx_agg_bmap_size;
779
780         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
781         if (next >= max)
782                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
783         return next;
784 }
785
786 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
787                                      struct bnxt_rx_ring_info *rxr,
788                                      u16 prod, gfp_t gfp)
789 {
790         struct rx_bd *rxbd =
791                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
792         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
793         struct pci_dev *pdev = bp->pdev;
794         struct page *page;
795         dma_addr_t mapping;
796         u16 sw_prod = rxr->rx_sw_agg_prod;
797         unsigned int offset = 0;
798
799         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
800                 page = rxr->rx_page;
801                 if (!page) {
802                         page = alloc_page(gfp);
803                         if (!page)
804                                 return -ENOMEM;
805                         rxr->rx_page = page;
806                         rxr->rx_page_offset = 0;
807                 }
808                 offset = rxr->rx_page_offset;
809                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
810                 if (rxr->rx_page_offset == PAGE_SIZE)
811                         rxr->rx_page = NULL;
812                 else
813                         get_page(page);
814         } else {
815                 page = alloc_page(gfp);
816                 if (!page)
817                         return -ENOMEM;
818         }
819
820         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
821                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
822                                      DMA_ATTR_WEAK_ORDERING);
823         if (dma_mapping_error(&pdev->dev, mapping)) {
824                 __free_page(page);
825                 return -EIO;
826         }
827
828         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
829                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
830
831         __set_bit(sw_prod, rxr->rx_agg_bmap);
832         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
833         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
834
835         rx_agg_buf->page = page;
836         rx_agg_buf->offset = offset;
837         rx_agg_buf->mapping = mapping;
838         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
839         rxbd->rx_bd_opaque = sw_prod;
840         return 0;
841 }
842
843 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
844                                        struct bnxt_cp_ring_info *cpr,
845                                        u16 cp_cons, u16 curr)
846 {
847         struct rx_agg_cmp *agg;
848
849         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
850         agg = (struct rx_agg_cmp *)
851                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
852         return agg;
853 }
854
855 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
856                                               struct bnxt_rx_ring_info *rxr,
857                                               u16 agg_id, u16 curr)
858 {
859         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
860
861         return &tpa_info->agg_arr[curr];
862 }
863
864 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
865                                    u16 start, u32 agg_bufs, bool tpa)
866 {
867         struct bnxt_napi *bnapi = cpr->bnapi;
868         struct bnxt *bp = bnapi->bp;
869         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
870         u16 prod = rxr->rx_agg_prod;
871         u16 sw_prod = rxr->rx_sw_agg_prod;
872         bool p5_tpa = false;
873         u32 i;
874
875         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
876                 p5_tpa = true;
877
878         for (i = 0; i < agg_bufs; i++) {
879                 u16 cons;
880                 struct rx_agg_cmp *agg;
881                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
882                 struct rx_bd *prod_bd;
883                 struct page *page;
884
885                 if (p5_tpa)
886                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
887                 else
888                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
889                 cons = agg->rx_agg_cmp_opaque;
890                 __clear_bit(cons, rxr->rx_agg_bmap);
891
892                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
893                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
894
895                 __set_bit(sw_prod, rxr->rx_agg_bmap);
896                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
897                 cons_rx_buf = &rxr->rx_agg_ring[cons];
898
899                 /* It is possible for sw_prod to be equal to cons, so
900                  * set cons_rx_buf->page to NULL first.
901                  */
902                 page = cons_rx_buf->page;
903                 cons_rx_buf->page = NULL;
904                 prod_rx_buf->page = page;
905                 prod_rx_buf->offset = cons_rx_buf->offset;
906
907                 prod_rx_buf->mapping = cons_rx_buf->mapping;
908
909                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
910
911                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
912                 prod_bd->rx_bd_opaque = sw_prod;
913
914                 prod = NEXT_RX_AGG(prod);
915                 sw_prod = NEXT_RX_AGG(sw_prod);
916         }
917         rxr->rx_agg_prod = prod;
918         rxr->rx_sw_agg_prod = sw_prod;
919 }
920
921 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
922                                         struct bnxt_rx_ring_info *rxr,
923                                         u16 cons, void *data, u8 *data_ptr,
924                                         dma_addr_t dma_addr,
925                                         unsigned int offset_and_len)
926 {
927         unsigned int payload = offset_and_len >> 16;
928         unsigned int len = offset_and_len & 0xffff;
929         skb_frag_t *frag;
930         struct page *page = data;
931         u16 prod = rxr->rx_prod;
932         struct sk_buff *skb;
933         int off, err;
934
935         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
936         if (unlikely(err)) {
937                 bnxt_reuse_rx_data(rxr, cons, data);
938                 return NULL;
939         }
940         dma_addr -= bp->rx_dma_offset;
941         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
942                              DMA_ATTR_WEAK_ORDERING);
943         page_pool_release_page(rxr->page_pool, page);
944
945         if (unlikely(!payload))
946                 payload = eth_get_headlen(bp->dev, data_ptr, len);
947
948         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
949         if (!skb) {
950                 __free_page(page);
951                 return NULL;
952         }
953
954         off = (void *)data_ptr - page_address(page);
955         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
956         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
957                payload + NET_IP_ALIGN);
958
959         frag = &skb_shinfo(skb)->frags[0];
960         skb_frag_size_sub(frag, payload);
961         skb_frag_off_add(frag, payload);
962         skb->data_len -= payload;
963         skb->tail += payload;
964
965         return skb;
966 }
967
968 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
969                                    struct bnxt_rx_ring_info *rxr, u16 cons,
970                                    void *data, u8 *data_ptr,
971                                    dma_addr_t dma_addr,
972                                    unsigned int offset_and_len)
973 {
974         u16 prod = rxr->rx_prod;
975         struct sk_buff *skb;
976         int err;
977
978         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
979         if (unlikely(err)) {
980                 bnxt_reuse_rx_data(rxr, cons, data);
981                 return NULL;
982         }
983
984         skb = build_skb(data, 0);
985         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
986                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
987         if (!skb) {
988                 kfree(data);
989                 return NULL;
990         }
991
992         skb_reserve(skb, bp->rx_offset);
993         skb_put(skb, offset_and_len & 0xffff);
994         return skb;
995 }
996
997 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
998                                      struct bnxt_cp_ring_info *cpr,
999                                      struct sk_buff *skb, u16 idx,
1000                                      u32 agg_bufs, bool tpa)
1001 {
1002         struct bnxt_napi *bnapi = cpr->bnapi;
1003         struct pci_dev *pdev = bp->pdev;
1004         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1005         u16 prod = rxr->rx_agg_prod;
1006         bool p5_tpa = false;
1007         u32 i;
1008
1009         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1010                 p5_tpa = true;
1011
1012         for (i = 0; i < agg_bufs; i++) {
1013                 u16 cons, frag_len;
1014                 struct rx_agg_cmp *agg;
1015                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1016                 struct page *page;
1017                 dma_addr_t mapping;
1018
1019                 if (p5_tpa)
1020                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1021                 else
1022                         agg = bnxt_get_agg(bp, cpr, idx, i);
1023                 cons = agg->rx_agg_cmp_opaque;
1024                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1025                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1026
1027                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1028                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1029                                    cons_rx_buf->offset, frag_len);
1030                 __clear_bit(cons, rxr->rx_agg_bmap);
1031
1032                 /* It is possible for bnxt_alloc_rx_page() to allocate
1033                  * a sw_prod index that equals the cons index, so we
1034                  * need to clear the cons entry now.
1035                  */
1036                 mapping = cons_rx_buf->mapping;
1037                 page = cons_rx_buf->page;
1038                 cons_rx_buf->page = NULL;
1039
1040                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1041                         struct skb_shared_info *shinfo;
1042                         unsigned int nr_frags;
1043
1044                         shinfo = skb_shinfo(skb);
1045                         nr_frags = --shinfo->nr_frags;
1046                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1047
1048                         dev_kfree_skb(skb);
1049
1050                         cons_rx_buf->page = page;
1051
1052                         /* Update prod since possibly some pages have been
1053                          * allocated already.
1054                          */
1055                         rxr->rx_agg_prod = prod;
1056                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1057                         return NULL;
1058                 }
1059
1060                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1061                                      PCI_DMA_FROMDEVICE,
1062                                      DMA_ATTR_WEAK_ORDERING);
1063
1064                 skb->data_len += frag_len;
1065                 skb->len += frag_len;
1066                 skb->truesize += PAGE_SIZE;
1067
1068                 prod = NEXT_RX_AGG(prod);
1069         }
1070         rxr->rx_agg_prod = prod;
1071         return skb;
1072 }
1073
1074 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1075                                u8 agg_bufs, u32 *raw_cons)
1076 {
1077         u16 last;
1078         struct rx_agg_cmp *agg;
1079
1080         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1081         last = RING_CMP(*raw_cons);
1082         agg = (struct rx_agg_cmp *)
1083                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1084         return RX_AGG_CMP_VALID(agg, *raw_cons);
1085 }
1086
1087 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1088                                             unsigned int len,
1089                                             dma_addr_t mapping)
1090 {
1091         struct bnxt *bp = bnapi->bp;
1092         struct pci_dev *pdev = bp->pdev;
1093         struct sk_buff *skb;
1094
1095         skb = napi_alloc_skb(&bnapi->napi, len);
1096         if (!skb)
1097                 return NULL;
1098
1099         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1100                                 bp->rx_dir);
1101
1102         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1103                len + NET_IP_ALIGN);
1104
1105         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1106                                    bp->rx_dir);
1107
1108         skb_put(skb, len);
1109         return skb;
1110 }
1111
1112 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1113                            u32 *raw_cons, void *cmp)
1114 {
1115         struct rx_cmp *rxcmp = cmp;
1116         u32 tmp_raw_cons = *raw_cons;
1117         u8 cmp_type, agg_bufs = 0;
1118
1119         cmp_type = RX_CMP_TYPE(rxcmp);
1120
1121         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1122                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1123                             RX_CMP_AGG_BUFS) >>
1124                            RX_CMP_AGG_BUFS_SHIFT;
1125         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1126                 struct rx_tpa_end_cmp *tpa_end = cmp;
1127
1128                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1129                         return 0;
1130
1131                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1132         }
1133
1134         if (agg_bufs) {
1135                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1136                         return -EBUSY;
1137         }
1138         *raw_cons = tmp_raw_cons;
1139         return 0;
1140 }
1141
1142 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1143 {
1144         if (BNXT_PF(bp))
1145                 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1146         else
1147                 schedule_delayed_work(&bp->fw_reset_task, delay);
1148 }
1149
1150 static void bnxt_queue_sp_work(struct bnxt *bp)
1151 {
1152         if (BNXT_PF(bp))
1153                 queue_work(bnxt_pf_wq, &bp->sp_task);
1154         else
1155                 schedule_work(&bp->sp_task);
1156 }
1157
1158 static void bnxt_cancel_sp_work(struct bnxt *bp)
1159 {
1160         if (BNXT_PF(bp))
1161                 flush_workqueue(bnxt_pf_wq);
1162         else
1163                 cancel_work_sync(&bp->sp_task);
1164 }
1165
1166 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1167 {
1168         if (!rxr->bnapi->in_reset) {
1169                 rxr->bnapi->in_reset = true;
1170                 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1171                 bnxt_queue_sp_work(bp);
1172         }
1173         rxr->rx_next_cons = 0xffff;
1174 }
1175
1176 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1177 {
1178         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1179         u16 idx = agg_id & MAX_TPA_P5_MASK;
1180
1181         if (test_bit(idx, map->agg_idx_bmap))
1182                 idx = find_first_zero_bit(map->agg_idx_bmap,
1183                                           BNXT_AGG_IDX_BMAP_SIZE);
1184         __set_bit(idx, map->agg_idx_bmap);
1185         map->agg_id_tbl[agg_id] = idx;
1186         return idx;
1187 }
1188
1189 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1190 {
1191         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1192
1193         __clear_bit(idx, map->agg_idx_bmap);
1194 }
1195
1196 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1197 {
1198         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1199
1200         return map->agg_id_tbl[agg_id];
1201 }
1202
1203 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1204                            struct rx_tpa_start_cmp *tpa_start,
1205                            struct rx_tpa_start_cmp_ext *tpa_start1)
1206 {
1207         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1208         struct bnxt_tpa_info *tpa_info;
1209         u16 cons, prod, agg_id;
1210         struct rx_bd *prod_bd;
1211         dma_addr_t mapping;
1212
1213         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1214                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1215                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1216         } else {
1217                 agg_id = TPA_START_AGG_ID(tpa_start);
1218         }
1219         cons = tpa_start->rx_tpa_start_cmp_opaque;
1220         prod = rxr->rx_prod;
1221         cons_rx_buf = &rxr->rx_buf_ring[cons];
1222         prod_rx_buf = &rxr->rx_buf_ring[prod];
1223         tpa_info = &rxr->rx_tpa[agg_id];
1224
1225         if (unlikely(cons != rxr->rx_next_cons ||
1226                      TPA_START_ERROR(tpa_start))) {
1227                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1228                             cons, rxr->rx_next_cons,
1229                             TPA_START_ERROR_CODE(tpa_start1));
1230                 bnxt_sched_reset(bp, rxr);
1231                 return;
1232         }
1233         /* Store cfa_code in tpa_info to use in tpa_end
1234          * completion processing.
1235          */
1236         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1237         prod_rx_buf->data = tpa_info->data;
1238         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1239
1240         mapping = tpa_info->mapping;
1241         prod_rx_buf->mapping = mapping;
1242
1243         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1244
1245         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1246
1247         tpa_info->data = cons_rx_buf->data;
1248         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1249         cons_rx_buf->data = NULL;
1250         tpa_info->mapping = cons_rx_buf->mapping;
1251
1252         tpa_info->len =
1253                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1254                                 RX_TPA_START_CMP_LEN_SHIFT;
1255         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1256                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1257
1258                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1259                 tpa_info->gso_type = SKB_GSO_TCPV4;
1260                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1261                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1262                         tpa_info->gso_type = SKB_GSO_TCPV6;
1263                 tpa_info->rss_hash =
1264                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1265         } else {
1266                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1267                 tpa_info->gso_type = 0;
1268                 if (netif_msg_rx_err(bp))
1269                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
1270         }
1271         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1272         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1273         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1274         tpa_info->agg_count = 0;
1275
1276         rxr->rx_prod = NEXT_RX(prod);
1277         cons = NEXT_RX(cons);
1278         rxr->rx_next_cons = NEXT_RX(cons);
1279         cons_rx_buf = &rxr->rx_buf_ring[cons];
1280
1281         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1282         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1283         cons_rx_buf->data = NULL;
1284 }
1285
1286 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1287 {
1288         if (agg_bufs)
1289                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1290 }
1291
1292 #ifdef CONFIG_INET
1293 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1294 {
1295         struct udphdr *uh = NULL;
1296
1297         if (ip_proto == htons(ETH_P_IP)) {
1298                 struct iphdr *iph = (struct iphdr *)skb->data;
1299
1300                 if (iph->protocol == IPPROTO_UDP)
1301                         uh = (struct udphdr *)(iph + 1);
1302         } else {
1303                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1304
1305                 if (iph->nexthdr == IPPROTO_UDP)
1306                         uh = (struct udphdr *)(iph + 1);
1307         }
1308         if (uh) {
1309                 if (uh->check)
1310                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1311                 else
1312                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1313         }
1314 }
1315 #endif
1316
1317 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1318                                            int payload_off, int tcp_ts,
1319                                            struct sk_buff *skb)
1320 {
1321 #ifdef CONFIG_INET
1322         struct tcphdr *th;
1323         int len, nw_off;
1324         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1325         u32 hdr_info = tpa_info->hdr_info;
1326         bool loopback = false;
1327
1328         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1329         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1330         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1331
1332         /* If the packet is an internal loopback packet, the offsets will
1333          * have an extra 4 bytes.
1334          */
1335         if (inner_mac_off == 4) {
1336                 loopback = true;
1337         } else if (inner_mac_off > 4) {
1338                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1339                                             ETH_HLEN - 2));
1340
1341                 /* We only support inner iPv4/ipv6.  If we don't see the
1342                  * correct protocol ID, it must be a loopback packet where
1343                  * the offsets are off by 4.
1344                  */
1345                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1346                         loopback = true;
1347         }
1348         if (loopback) {
1349                 /* internal loopback packet, subtract all offsets by 4 */
1350                 inner_ip_off -= 4;
1351                 inner_mac_off -= 4;
1352                 outer_ip_off -= 4;
1353         }
1354
1355         nw_off = inner_ip_off - ETH_HLEN;
1356         skb_set_network_header(skb, nw_off);
1357         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1358                 struct ipv6hdr *iph = ipv6_hdr(skb);
1359
1360                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1361                 len = skb->len - skb_transport_offset(skb);
1362                 th = tcp_hdr(skb);
1363                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1364         } else {
1365                 struct iphdr *iph = ip_hdr(skb);
1366
1367                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1368                 len = skb->len - skb_transport_offset(skb);
1369                 th = tcp_hdr(skb);
1370                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1371         }
1372
1373         if (inner_mac_off) { /* tunnel */
1374                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1375                                             ETH_HLEN - 2));
1376
1377                 bnxt_gro_tunnel(skb, proto);
1378         }
1379 #endif
1380         return skb;
1381 }
1382
1383 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1384                                            int payload_off, int tcp_ts,
1385                                            struct sk_buff *skb)
1386 {
1387 #ifdef CONFIG_INET
1388         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1389         u32 hdr_info = tpa_info->hdr_info;
1390         int iphdr_len, nw_off;
1391
1392         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1393         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1394         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1395
1396         nw_off = inner_ip_off - ETH_HLEN;
1397         skb_set_network_header(skb, nw_off);
1398         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1399                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1400         skb_set_transport_header(skb, nw_off + iphdr_len);
1401
1402         if (inner_mac_off) { /* tunnel */
1403                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1404                                             ETH_HLEN - 2));
1405
1406                 bnxt_gro_tunnel(skb, proto);
1407         }
1408 #endif
1409         return skb;
1410 }
1411
1412 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1413 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1414
1415 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1416                                            int payload_off, int tcp_ts,
1417                                            struct sk_buff *skb)
1418 {
1419 #ifdef CONFIG_INET
1420         struct tcphdr *th;
1421         int len, nw_off, tcp_opt_len = 0;
1422
1423         if (tcp_ts)
1424                 tcp_opt_len = 12;
1425
1426         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1427                 struct iphdr *iph;
1428
1429                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1430                          ETH_HLEN;
1431                 skb_set_network_header(skb, nw_off);
1432                 iph = ip_hdr(skb);
1433                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1434                 len = skb->len - skb_transport_offset(skb);
1435                 th = tcp_hdr(skb);
1436                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1437         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1438                 struct ipv6hdr *iph;
1439
1440                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1441                          ETH_HLEN;
1442                 skb_set_network_header(skb, nw_off);
1443                 iph = ipv6_hdr(skb);
1444                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1445                 len = skb->len - skb_transport_offset(skb);
1446                 th = tcp_hdr(skb);
1447                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1448         } else {
1449                 dev_kfree_skb_any(skb);
1450                 return NULL;
1451         }
1452
1453         if (nw_off) /* tunnel */
1454                 bnxt_gro_tunnel(skb, skb->protocol);
1455 #endif
1456         return skb;
1457 }
1458
1459 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1460                                            struct bnxt_tpa_info *tpa_info,
1461                                            struct rx_tpa_end_cmp *tpa_end,
1462                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1463                                            struct sk_buff *skb)
1464 {
1465 #ifdef CONFIG_INET
1466         int payload_off;
1467         u16 segs;
1468
1469         segs = TPA_END_TPA_SEGS(tpa_end);
1470         if (segs == 1)
1471                 return skb;
1472
1473         NAPI_GRO_CB(skb)->count = segs;
1474         skb_shinfo(skb)->gso_size =
1475                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1476         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1477         if (bp->flags & BNXT_FLAG_CHIP_P5)
1478                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1479         else
1480                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1481         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1482         if (likely(skb))
1483                 tcp_gro_complete(skb);
1484 #endif
1485         return skb;
1486 }
1487
1488 /* Given the cfa_code of a received packet determine which
1489  * netdev (vf-rep or PF) the packet is destined to.
1490  */
1491 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1492 {
1493         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1494
1495         /* if vf-rep dev is NULL, the must belongs to the PF */
1496         return dev ? dev : bp->dev;
1497 }
1498
1499 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1500                                            struct bnxt_cp_ring_info *cpr,
1501                                            u32 *raw_cons,
1502                                            struct rx_tpa_end_cmp *tpa_end,
1503                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1504                                            u8 *event)
1505 {
1506         struct bnxt_napi *bnapi = cpr->bnapi;
1507         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1508         u8 *data_ptr, agg_bufs;
1509         unsigned int len;
1510         struct bnxt_tpa_info *tpa_info;
1511         dma_addr_t mapping;
1512         struct sk_buff *skb;
1513         u16 idx = 0, agg_id;
1514         void *data;
1515         bool gro;
1516
1517         if (unlikely(bnapi->in_reset)) {
1518                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1519
1520                 if (rc < 0)
1521                         return ERR_PTR(-EBUSY);
1522                 return NULL;
1523         }
1524
1525         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1526                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1527                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1528                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1529                 tpa_info = &rxr->rx_tpa[agg_id];
1530                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1531                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1532                                     agg_bufs, tpa_info->agg_count);
1533                         agg_bufs = tpa_info->agg_count;
1534                 }
1535                 tpa_info->agg_count = 0;
1536                 *event |= BNXT_AGG_EVENT;
1537                 bnxt_free_agg_idx(rxr, agg_id);
1538                 idx = agg_id;
1539                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1540         } else {
1541                 agg_id = TPA_END_AGG_ID(tpa_end);
1542                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1543                 tpa_info = &rxr->rx_tpa[agg_id];
1544                 idx = RING_CMP(*raw_cons);
1545                 if (agg_bufs) {
1546                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1547                                 return ERR_PTR(-EBUSY);
1548
1549                         *event |= BNXT_AGG_EVENT;
1550                         idx = NEXT_CMP(idx);
1551                 }
1552                 gro = !!TPA_END_GRO(tpa_end);
1553         }
1554         data = tpa_info->data;
1555         data_ptr = tpa_info->data_ptr;
1556         prefetch(data_ptr);
1557         len = tpa_info->len;
1558         mapping = tpa_info->mapping;
1559
1560         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1561                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1562                 if (agg_bufs > MAX_SKB_FRAGS)
1563                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1564                                     agg_bufs, (int)MAX_SKB_FRAGS);
1565                 return NULL;
1566         }
1567
1568         if (len <= bp->rx_copy_thresh) {
1569                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1570                 if (!skb) {
1571                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1572                         return NULL;
1573                 }
1574         } else {
1575                 u8 *new_data;
1576                 dma_addr_t new_mapping;
1577
1578                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1579                 if (!new_data) {
1580                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1581                         return NULL;
1582                 }
1583
1584                 tpa_info->data = new_data;
1585                 tpa_info->data_ptr = new_data + bp->rx_offset;
1586                 tpa_info->mapping = new_mapping;
1587
1588                 skb = build_skb(data, 0);
1589                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1590                                        bp->rx_buf_use_size, bp->rx_dir,
1591                                        DMA_ATTR_WEAK_ORDERING);
1592
1593                 if (!skb) {
1594                         kfree(data);
1595                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1596                         return NULL;
1597                 }
1598                 skb_reserve(skb, bp->rx_offset);
1599                 skb_put(skb, len);
1600         }
1601
1602         if (agg_bufs) {
1603                 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1604                 if (!skb) {
1605                         /* Page reuse already handled by bnxt_rx_pages(). */
1606                         return NULL;
1607                 }
1608         }
1609
1610         skb->protocol =
1611                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1612
1613         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1614                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1615
1616         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1617             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1618                 u16 vlan_proto = tpa_info->metadata >>
1619                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1620                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1621
1622                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1623         }
1624
1625         skb_checksum_none_assert(skb);
1626         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1627                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1628                 skb->csum_level =
1629                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1630         }
1631
1632         if (gro)
1633                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1634
1635         return skb;
1636 }
1637
1638 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1639                          struct rx_agg_cmp *rx_agg)
1640 {
1641         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1642         struct bnxt_tpa_info *tpa_info;
1643
1644         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1645         tpa_info = &rxr->rx_tpa[agg_id];
1646         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1647         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1648 }
1649
1650 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1651                              struct sk_buff *skb)
1652 {
1653         if (skb->dev != bp->dev) {
1654                 /* this packet belongs to a vf-rep */
1655                 bnxt_vf_rep_rx(bp, skb);
1656                 return;
1657         }
1658         skb_record_rx_queue(skb, bnapi->index);
1659         napi_gro_receive(&bnapi->napi, skb);
1660 }
1661
1662 /* returns the following:
1663  * 1       - 1 packet successfully received
1664  * 0       - successful TPA_START, packet not completed yet
1665  * -EBUSY  - completion ring does not have all the agg buffers yet
1666  * -ENOMEM - packet aborted due to out of memory
1667  * -EIO    - packet aborted due to hw error indicated in BD
1668  */
1669 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1670                        u32 *raw_cons, u8 *event)
1671 {
1672         struct bnxt_napi *bnapi = cpr->bnapi;
1673         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1674         struct net_device *dev = bp->dev;
1675         struct rx_cmp *rxcmp;
1676         struct rx_cmp_ext *rxcmp1;
1677         u32 tmp_raw_cons = *raw_cons;
1678         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1679         struct bnxt_sw_rx_bd *rx_buf;
1680         unsigned int len;
1681         u8 *data_ptr, agg_bufs, cmp_type;
1682         dma_addr_t dma_addr;
1683         struct sk_buff *skb;
1684         void *data;
1685         int rc = 0;
1686         u32 misc;
1687
1688         rxcmp = (struct rx_cmp *)
1689                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1690
1691         cmp_type = RX_CMP_TYPE(rxcmp);
1692
1693         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1694                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1695                 goto next_rx_no_prod_no_len;
1696         }
1697
1698         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1699         cp_cons = RING_CMP(tmp_raw_cons);
1700         rxcmp1 = (struct rx_cmp_ext *)
1701                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1702
1703         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1704                 return -EBUSY;
1705
1706         prod = rxr->rx_prod;
1707
1708         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1709                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1710                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1711
1712                 *event |= BNXT_RX_EVENT;
1713                 goto next_rx_no_prod_no_len;
1714
1715         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1716                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1717                                    (struct rx_tpa_end_cmp *)rxcmp,
1718                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1719
1720                 if (IS_ERR(skb))
1721                         return -EBUSY;
1722
1723                 rc = -ENOMEM;
1724                 if (likely(skb)) {
1725                         bnxt_deliver_skb(bp, bnapi, skb);
1726                         rc = 1;
1727                 }
1728                 *event |= BNXT_RX_EVENT;
1729                 goto next_rx_no_prod_no_len;
1730         }
1731
1732         cons = rxcmp->rx_cmp_opaque;
1733         if (unlikely(cons != rxr->rx_next_cons)) {
1734                 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1735
1736                 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1737                             cons, rxr->rx_next_cons);
1738                 bnxt_sched_reset(bp, rxr);
1739                 return rc1;
1740         }
1741         rx_buf = &rxr->rx_buf_ring[cons];
1742         data = rx_buf->data;
1743         data_ptr = rx_buf->data_ptr;
1744         prefetch(data_ptr);
1745
1746         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1747         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1748
1749         if (agg_bufs) {
1750                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1751                         return -EBUSY;
1752
1753                 cp_cons = NEXT_CMP(cp_cons);
1754                 *event |= BNXT_AGG_EVENT;
1755         }
1756         *event |= BNXT_RX_EVENT;
1757
1758         rx_buf->data = NULL;
1759         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1760                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1761
1762                 bnxt_reuse_rx_data(rxr, cons, data);
1763                 if (agg_bufs)
1764                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1765                                                false);
1766
1767                 rc = -EIO;
1768                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1769                         bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1770                         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
1771                                 netdev_warn(bp->dev, "RX buffer error %x\n",
1772                                             rx_err);
1773                                 bnxt_sched_reset(bp, rxr);
1774                         }
1775                 }
1776                 goto next_rx_no_len;
1777         }
1778
1779         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1780         dma_addr = rx_buf->mapping;
1781
1782         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1783                 rc = 1;
1784                 goto next_rx;
1785         }
1786
1787         if (len <= bp->rx_copy_thresh) {
1788                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1789                 bnxt_reuse_rx_data(rxr, cons, data);
1790                 if (!skb) {
1791                         if (agg_bufs)
1792                                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1793                                                        agg_bufs, false);
1794                         rc = -ENOMEM;
1795                         goto next_rx;
1796                 }
1797         } else {
1798                 u32 payload;
1799
1800                 if (rx_buf->data_ptr == data_ptr)
1801                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1802                 else
1803                         payload = 0;
1804                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1805                                       payload | len);
1806                 if (!skb) {
1807                         rc = -ENOMEM;
1808                         goto next_rx;
1809                 }
1810         }
1811
1812         if (agg_bufs) {
1813                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1814                 if (!skb) {
1815                         rc = -ENOMEM;
1816                         goto next_rx;
1817                 }
1818         }
1819
1820         if (RX_CMP_HASH_VALID(rxcmp)) {
1821                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1822                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1823
1824                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1825                 if (hash_type != 1 && hash_type != 3)
1826                         type = PKT_HASH_TYPE_L3;
1827                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1828         }
1829
1830         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1831         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1832
1833         if ((rxcmp1->rx_cmp_flags2 &
1834              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1835             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1836                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1837                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1838                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1839
1840                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1841         }
1842
1843         skb_checksum_none_assert(skb);
1844         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1845                 if (dev->features & NETIF_F_RXCSUM) {
1846                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1847                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1848                 }
1849         } else {
1850                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1851                         if (dev->features & NETIF_F_RXCSUM)
1852                                 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1853                 }
1854         }
1855
1856         bnxt_deliver_skb(bp, bnapi, skb);
1857         rc = 1;
1858
1859 next_rx:
1860         cpr->rx_packets += 1;
1861         cpr->rx_bytes += len;
1862
1863 next_rx_no_len:
1864         rxr->rx_prod = NEXT_RX(prod);
1865         rxr->rx_next_cons = NEXT_RX(cons);
1866
1867 next_rx_no_prod_no_len:
1868         *raw_cons = tmp_raw_cons;
1869
1870         return rc;
1871 }
1872
1873 /* In netpoll mode, if we are using a combined completion ring, we need to
1874  * discard the rx packets and recycle the buffers.
1875  */
1876 static int bnxt_force_rx_discard(struct bnxt *bp,
1877                                  struct bnxt_cp_ring_info *cpr,
1878                                  u32 *raw_cons, u8 *event)
1879 {
1880         u32 tmp_raw_cons = *raw_cons;
1881         struct rx_cmp_ext *rxcmp1;
1882         struct rx_cmp *rxcmp;
1883         u16 cp_cons;
1884         u8 cmp_type;
1885
1886         cp_cons = RING_CMP(tmp_raw_cons);
1887         rxcmp = (struct rx_cmp *)
1888                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1889
1890         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1891         cp_cons = RING_CMP(tmp_raw_cons);
1892         rxcmp1 = (struct rx_cmp_ext *)
1893                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1894
1895         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1896                 return -EBUSY;
1897
1898         cmp_type = RX_CMP_TYPE(rxcmp);
1899         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1900                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1901                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1902         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1903                 struct rx_tpa_end_cmp_ext *tpa_end1;
1904
1905                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1906                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1907                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1908         }
1909         return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1910 }
1911
1912 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1913 {
1914         struct bnxt_fw_health *fw_health = bp->fw_health;
1915         u32 reg = fw_health->regs[reg_idx];
1916         u32 reg_type, reg_off, val = 0;
1917
1918         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1919         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1920         switch (reg_type) {
1921         case BNXT_FW_HEALTH_REG_TYPE_CFG:
1922                 pci_read_config_dword(bp->pdev, reg_off, &val);
1923                 break;
1924         case BNXT_FW_HEALTH_REG_TYPE_GRC:
1925                 reg_off = fw_health->mapped_regs[reg_idx];
1926                 /* fall through */
1927         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1928                 val = readl(bp->bar0 + reg_off);
1929                 break;
1930         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1931                 val = readl(bp->bar1 + reg_off);
1932                 break;
1933         }
1934         if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1935                 val &= fw_health->fw_reset_inprog_reg_mask;
1936         return val;
1937 }
1938
1939 #define BNXT_GET_EVENT_PORT(data)       \
1940         ((data) &                       \
1941          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1942
1943 static int bnxt_async_event_process(struct bnxt *bp,
1944                                     struct hwrm_async_event_cmpl *cmpl)
1945 {
1946         u16 event_id = le16_to_cpu(cmpl->event_id);
1947
1948         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1949         switch (event_id) {
1950         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1951                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1952                 struct bnxt_link_info *link_info = &bp->link_info;
1953
1954                 if (BNXT_VF(bp))
1955                         goto async_event_process_exit;
1956
1957                 /* print unsupported speed warning in forced speed mode only */
1958                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1959                     (data1 & 0x20000)) {
1960                         u16 fw_speed = link_info->force_link_speed;
1961                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1962
1963                         if (speed != SPEED_UNKNOWN)
1964                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1965                                             speed);
1966                 }
1967                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1968         }
1969         /* fall through */
1970         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
1971         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
1972                 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
1973                 /* fall through */
1974         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1975                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1976                 break;
1977         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1978                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1979                 break;
1980         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1981                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1982                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1983
1984                 if (BNXT_VF(bp))
1985                         break;
1986
1987                 if (bp->pf.port_id != port_id)
1988                         break;
1989
1990                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1991                 break;
1992         }
1993         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1994                 if (BNXT_PF(bp))
1995                         goto async_event_process_exit;
1996                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1997                 break;
1998         case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
1999                 u32 data1 = le32_to_cpu(cmpl->event_data1);
2000
2001                 if (!bp->fw_health)
2002                         goto async_event_process_exit;
2003
2004                 bp->fw_reset_timestamp = jiffies;
2005                 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2006                 if (!bp->fw_reset_min_dsecs)
2007                         bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2008                 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2009                 if (!bp->fw_reset_max_dsecs)
2010                         bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2011                 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2012                         netdev_warn(bp->dev, "Firmware fatal reset event received\n");
2013                         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2014                 } else {
2015                         netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
2016                                     bp->fw_reset_max_dsecs * 100);
2017                 }
2018                 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2019                 break;
2020         }
2021         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2022                 struct bnxt_fw_health *fw_health = bp->fw_health;
2023                 u32 data1 = le32_to_cpu(cmpl->event_data1);
2024
2025                 if (!fw_health)
2026                         goto async_event_process_exit;
2027
2028                 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2029                 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2030                 if (!fw_health->enabled)
2031                         break;
2032
2033                 if (netif_msg_drv(bp))
2034                         netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
2035                                     fw_health->enabled, fw_health->master,
2036                                     bnxt_fw_health_readl(bp,
2037                                                          BNXT_FW_RESET_CNT_REG),
2038                                     bnxt_fw_health_readl(bp,
2039                                                          BNXT_FW_HEALTH_REG));
2040                 fw_health->tmr_multiplier =
2041                         DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2042                                      bp->current_interval * 10);
2043                 fw_health->tmr_counter = fw_health->tmr_multiplier;
2044                 fw_health->last_fw_heartbeat =
2045                         bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2046                 fw_health->last_fw_reset_cnt =
2047                         bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2048                 goto async_event_process_exit;
2049         }
2050         default:
2051                 goto async_event_process_exit;
2052         }
2053         bnxt_queue_sp_work(bp);
2054 async_event_process_exit:
2055         bnxt_ulp_async_events(bp, cmpl);
2056         return 0;
2057 }
2058
2059 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2060 {
2061         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2062         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2063         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2064                                 (struct hwrm_fwd_req_cmpl *)txcmp;
2065
2066         switch (cmpl_type) {
2067         case CMPL_BASE_TYPE_HWRM_DONE:
2068                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2069                 if (seq_id == bp->hwrm_intr_seq_id)
2070                         bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2071                 else
2072                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2073                 break;
2074
2075         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2076                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2077
2078                 if ((vf_id < bp->pf.first_vf_id) ||
2079                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2080                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2081                                    vf_id);
2082                         return -EINVAL;
2083                 }
2084
2085                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2086                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2087                 bnxt_queue_sp_work(bp);
2088                 break;
2089
2090         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2091                 bnxt_async_event_process(bp,
2092                                          (struct hwrm_async_event_cmpl *)txcmp);
2093
2094         default:
2095                 break;
2096         }
2097
2098         return 0;
2099 }
2100
2101 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2102 {
2103         struct bnxt_napi *bnapi = dev_instance;
2104         struct bnxt *bp = bnapi->bp;
2105         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2106         u32 cons = RING_CMP(cpr->cp_raw_cons);
2107
2108         cpr->event_ctr++;
2109         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2110         napi_schedule(&bnapi->napi);
2111         return IRQ_HANDLED;
2112 }
2113
2114 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2115 {
2116         u32 raw_cons = cpr->cp_raw_cons;
2117         u16 cons = RING_CMP(raw_cons);
2118         struct tx_cmp *txcmp;
2119
2120         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2121
2122         return TX_CMP_VALID(txcmp, raw_cons);
2123 }
2124
2125 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2126 {
2127         struct bnxt_napi *bnapi = dev_instance;
2128         struct bnxt *bp = bnapi->bp;
2129         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2130         u32 cons = RING_CMP(cpr->cp_raw_cons);
2131         u32 int_status;
2132
2133         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2134
2135         if (!bnxt_has_work(bp, cpr)) {
2136                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2137                 /* return if erroneous interrupt */
2138                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2139                         return IRQ_NONE;
2140         }
2141
2142         /* disable ring IRQ */
2143         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2144
2145         /* Return here if interrupt is shared and is disabled. */
2146         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2147                 return IRQ_HANDLED;
2148
2149         napi_schedule(&bnapi->napi);
2150         return IRQ_HANDLED;
2151 }
2152
2153 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2154                             int budget)
2155 {
2156         struct bnxt_napi *bnapi = cpr->bnapi;
2157         u32 raw_cons = cpr->cp_raw_cons;
2158         u32 cons;
2159         int tx_pkts = 0;
2160         int rx_pkts = 0;
2161         u8 event = 0;
2162         struct tx_cmp *txcmp;
2163
2164         cpr->has_more_work = 0;
2165         cpr->had_work_done = 1;
2166         while (1) {
2167                 int rc;
2168
2169                 cons = RING_CMP(raw_cons);
2170                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2171
2172                 if (!TX_CMP_VALID(txcmp, raw_cons))
2173                         break;
2174
2175                 /* The valid test of the entry must be done first before
2176                  * reading any further.
2177                  */
2178                 dma_rmb();
2179                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2180                         tx_pkts++;
2181                         /* return full budget so NAPI will complete. */
2182                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2183                                 rx_pkts = budget;
2184                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2185                                 if (budget)
2186                                         cpr->has_more_work = 1;
2187                                 break;
2188                         }
2189                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2190                         if (likely(budget))
2191                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2192                         else
2193                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2194                                                            &event);
2195                         if (likely(rc >= 0))
2196                                 rx_pkts += rc;
2197                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2198                          * the NAPI budget.  Otherwise, we may potentially loop
2199                          * here forever if we consistently cannot allocate
2200                          * buffers.
2201                          */
2202                         else if (rc == -ENOMEM && budget)
2203                                 rx_pkts++;
2204                         else if (rc == -EBUSY)  /* partial completion */
2205                                 break;
2206                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2207                                      CMPL_BASE_TYPE_HWRM_DONE) ||
2208                                     (TX_CMP_TYPE(txcmp) ==
2209                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2210                                     (TX_CMP_TYPE(txcmp) ==
2211                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2212                         bnxt_hwrm_handler(bp, txcmp);
2213                 }
2214                 raw_cons = NEXT_RAW_CMP(raw_cons);
2215
2216                 if (rx_pkts && rx_pkts == budget) {
2217                         cpr->has_more_work = 1;
2218                         break;
2219                 }
2220         }
2221
2222         if (event & BNXT_REDIRECT_EVENT)
2223                 xdp_do_flush_map();
2224
2225         if (event & BNXT_TX_EVENT) {
2226                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2227                 u16 prod = txr->tx_prod;
2228
2229                 /* Sync BD data before updating doorbell */
2230                 wmb();
2231
2232                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2233         }
2234
2235         cpr->cp_raw_cons = raw_cons;
2236         bnapi->tx_pkts += tx_pkts;
2237         bnapi->events |= event;
2238         return rx_pkts;
2239 }
2240
2241 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2242 {
2243         if (bnapi->tx_pkts) {
2244                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2245                 bnapi->tx_pkts = 0;
2246         }
2247
2248         if (bnapi->events & BNXT_RX_EVENT) {
2249                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2250
2251                 if (bnapi->events & BNXT_AGG_EVENT)
2252                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2253                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2254         }
2255         bnapi->events = 0;
2256 }
2257
2258 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2259                           int budget)
2260 {
2261         struct bnxt_napi *bnapi = cpr->bnapi;
2262         int rx_pkts;
2263
2264         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2265
2266         /* ACK completion ring before freeing tx ring and producing new
2267          * buffers in rx/agg rings to prevent overflowing the completion
2268          * ring.
2269          */
2270         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2271
2272         __bnxt_poll_work_done(bp, bnapi);
2273         return rx_pkts;
2274 }
2275
2276 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2277 {
2278         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2279         struct bnxt *bp = bnapi->bp;
2280         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2281         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2282         struct tx_cmp *txcmp;
2283         struct rx_cmp_ext *rxcmp1;
2284         u32 cp_cons, tmp_raw_cons;
2285         u32 raw_cons = cpr->cp_raw_cons;
2286         u32 rx_pkts = 0;
2287         u8 event = 0;
2288
2289         while (1) {
2290                 int rc;
2291
2292                 cp_cons = RING_CMP(raw_cons);
2293                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2294
2295                 if (!TX_CMP_VALID(txcmp, raw_cons))
2296                         break;
2297
2298                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2299                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2300                         cp_cons = RING_CMP(tmp_raw_cons);
2301                         rxcmp1 = (struct rx_cmp_ext *)
2302                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2303
2304                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2305                                 break;
2306
2307                         /* force an error to recycle the buffer */
2308                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2309                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2310
2311                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2312                         if (likely(rc == -EIO) && budget)
2313                                 rx_pkts++;
2314                         else if (rc == -EBUSY)  /* partial completion */
2315                                 break;
2316                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2317                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2318                         bnxt_hwrm_handler(bp, txcmp);
2319                 } else {
2320                         netdev_err(bp->dev,
2321                                    "Invalid completion received on special ring\n");
2322                 }
2323                 raw_cons = NEXT_RAW_CMP(raw_cons);
2324
2325                 if (rx_pkts == budget)
2326                         break;
2327         }
2328
2329         cpr->cp_raw_cons = raw_cons;
2330         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2331         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2332
2333         if (event & BNXT_AGG_EVENT)
2334                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2335
2336         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2337                 napi_complete_done(napi, rx_pkts);
2338                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2339         }
2340         return rx_pkts;
2341 }
2342
2343 static int bnxt_poll(struct napi_struct *napi, int budget)
2344 {
2345         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2346         struct bnxt *bp = bnapi->bp;
2347         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2348         int work_done = 0;
2349
2350         while (1) {
2351                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2352
2353                 if (work_done >= budget) {
2354                         if (!budget)
2355                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2356                         break;
2357                 }
2358
2359                 if (!bnxt_has_work(bp, cpr)) {
2360                         if (napi_complete_done(napi, work_done))
2361                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2362                         break;
2363                 }
2364         }
2365         if (bp->flags & BNXT_FLAG_DIM) {
2366                 struct dim_sample dim_sample = {};
2367
2368                 dim_update_sample(cpr->event_ctr,
2369                                   cpr->rx_packets,
2370                                   cpr->rx_bytes,
2371                                   &dim_sample);
2372                 net_dim(&cpr->dim, dim_sample);
2373         }
2374         return work_done;
2375 }
2376
2377 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2378 {
2379         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2380         int i, work_done = 0;
2381
2382         for (i = 0; i < 2; i++) {
2383                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2384
2385                 if (cpr2) {
2386                         work_done += __bnxt_poll_work(bp, cpr2,
2387                                                       budget - work_done);
2388                         cpr->has_more_work |= cpr2->has_more_work;
2389                 }
2390         }
2391         return work_done;
2392 }
2393
2394 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2395                                  u64 dbr_type)
2396 {
2397         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2398         int i;
2399
2400         for (i = 0; i < 2; i++) {
2401                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2402                 struct bnxt_db_info *db;
2403
2404                 if (cpr2 && cpr2->had_work_done) {
2405                         db = &cpr2->cp_db;
2406                         writeq(db->db_key64 | dbr_type |
2407                                RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2408                         cpr2->had_work_done = 0;
2409                 }
2410         }
2411         __bnxt_poll_work_done(bp, bnapi);
2412 }
2413
2414 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2415 {
2416         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2417         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2418         u32 raw_cons = cpr->cp_raw_cons;
2419         struct bnxt *bp = bnapi->bp;
2420         struct nqe_cn *nqcmp;
2421         int work_done = 0;
2422         u32 cons;
2423
2424         if (cpr->has_more_work) {
2425                 cpr->has_more_work = 0;
2426                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2427         }
2428         while (1) {
2429                 cons = RING_CMP(raw_cons);
2430                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2431
2432                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2433                         if (cpr->has_more_work)
2434                                 break;
2435
2436                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2437                         cpr->cp_raw_cons = raw_cons;
2438                         if (napi_complete_done(napi, work_done))
2439                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2440                                                   cpr->cp_raw_cons);
2441                         return work_done;
2442                 }
2443
2444                 /* The valid test of the entry must be done first before
2445                  * reading any further.
2446                  */
2447                 dma_rmb();
2448
2449                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2450                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2451                         struct bnxt_cp_ring_info *cpr2;
2452
2453                         cpr2 = cpr->cp_ring_arr[idx];
2454                         work_done += __bnxt_poll_work(bp, cpr2,
2455                                                       budget - work_done);
2456                         cpr->has_more_work |= cpr2->has_more_work;
2457                 } else {
2458                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2459                 }
2460                 raw_cons = NEXT_RAW_CMP(raw_cons);
2461         }
2462         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2463         if (raw_cons != cpr->cp_raw_cons) {
2464                 cpr->cp_raw_cons = raw_cons;
2465                 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2466         }
2467         return work_done;
2468 }
2469
2470 static void bnxt_free_tx_skbs(struct bnxt *bp)
2471 {
2472         int i, max_idx;
2473         struct pci_dev *pdev = bp->pdev;
2474
2475         if (!bp->tx_ring)
2476                 return;
2477
2478         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2479         for (i = 0; i < bp->tx_nr_rings; i++) {
2480                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2481                 int j;
2482
2483                 for (j = 0; j < max_idx;) {
2484                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2485                         struct sk_buff *skb;
2486                         int k, last;
2487
2488                         if (i < bp->tx_nr_rings_xdp &&
2489                             tx_buf->action == XDP_REDIRECT) {
2490                                 dma_unmap_single(&pdev->dev,
2491                                         dma_unmap_addr(tx_buf, mapping),
2492                                         dma_unmap_len(tx_buf, len),
2493                                         PCI_DMA_TODEVICE);
2494                                 xdp_return_frame(tx_buf->xdpf);
2495                                 tx_buf->action = 0;
2496                                 tx_buf->xdpf = NULL;
2497                                 j++;
2498                                 continue;
2499                         }
2500
2501                         skb = tx_buf->skb;
2502                         if (!skb) {
2503                                 j++;
2504                                 continue;
2505                         }
2506
2507                         tx_buf->skb = NULL;
2508
2509                         if (tx_buf->is_push) {
2510                                 dev_kfree_skb(skb);
2511                                 j += 2;
2512                                 continue;
2513                         }
2514
2515                         dma_unmap_single(&pdev->dev,
2516                                          dma_unmap_addr(tx_buf, mapping),
2517                                          skb_headlen(skb),
2518                                          PCI_DMA_TODEVICE);
2519
2520                         last = tx_buf->nr_frags;
2521                         j += 2;
2522                         for (k = 0; k < last; k++, j++) {
2523                                 int ring_idx = j & bp->tx_ring_mask;
2524                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2525
2526                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2527                                 dma_unmap_page(
2528                                         &pdev->dev,
2529                                         dma_unmap_addr(tx_buf, mapping),
2530                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
2531                         }
2532                         dev_kfree_skb(skb);
2533                 }
2534                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2535         }
2536 }
2537
2538 static void bnxt_free_rx_skbs(struct bnxt *bp)
2539 {
2540         int i, max_idx, max_agg_idx;
2541         struct pci_dev *pdev = bp->pdev;
2542
2543         if (!bp->rx_ring)
2544                 return;
2545
2546         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2547         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2548         for (i = 0; i < bp->rx_nr_rings; i++) {
2549                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2550                 struct bnxt_tpa_idx_map *map;
2551                 int j;
2552
2553                 if (rxr->rx_tpa) {
2554                         for (j = 0; j < bp->max_tpa; j++) {
2555                                 struct bnxt_tpa_info *tpa_info =
2556                                                         &rxr->rx_tpa[j];
2557                                 u8 *data = tpa_info->data;
2558
2559                                 if (!data)
2560                                         continue;
2561
2562                                 dma_unmap_single_attrs(&pdev->dev,
2563                                                        tpa_info->mapping,
2564                                                        bp->rx_buf_use_size,
2565                                                        bp->rx_dir,
2566                                                        DMA_ATTR_WEAK_ORDERING);
2567
2568                                 tpa_info->data = NULL;
2569
2570                                 kfree(data);
2571                         }
2572                 }
2573
2574                 for (j = 0; j < max_idx; j++) {
2575                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2576                         dma_addr_t mapping = rx_buf->mapping;
2577                         void *data = rx_buf->data;
2578
2579                         if (!data)
2580                                 continue;
2581
2582                         rx_buf->data = NULL;
2583
2584                         if (BNXT_RX_PAGE_MODE(bp)) {
2585                                 mapping -= bp->rx_dma_offset;
2586                                 dma_unmap_page_attrs(&pdev->dev, mapping,
2587                                                      PAGE_SIZE, bp->rx_dir,
2588                                                      DMA_ATTR_WEAK_ORDERING);
2589                                 page_pool_recycle_direct(rxr->page_pool, data);
2590                         } else {
2591                                 dma_unmap_single_attrs(&pdev->dev, mapping,
2592                                                        bp->rx_buf_use_size,
2593                                                        bp->rx_dir,
2594                                                        DMA_ATTR_WEAK_ORDERING);
2595                                 kfree(data);
2596                         }
2597                 }
2598
2599                 for (j = 0; j < max_agg_idx; j++) {
2600                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2601                                 &rxr->rx_agg_ring[j];
2602                         struct page *page = rx_agg_buf->page;
2603
2604                         if (!page)
2605                                 continue;
2606
2607                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2608                                              BNXT_RX_PAGE_SIZE,
2609                                              PCI_DMA_FROMDEVICE,
2610                                              DMA_ATTR_WEAK_ORDERING);
2611
2612                         rx_agg_buf->page = NULL;
2613                         __clear_bit(j, rxr->rx_agg_bmap);
2614
2615                         __free_page(page);
2616                 }
2617                 if (rxr->rx_page) {
2618                         __free_page(rxr->rx_page);
2619                         rxr->rx_page = NULL;
2620                 }
2621                 map = rxr->rx_tpa_idx_map;
2622                 if (map)
2623                         memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2624         }
2625 }
2626
2627 static void bnxt_free_skbs(struct bnxt *bp)
2628 {
2629         bnxt_free_tx_skbs(bp);
2630         bnxt_free_rx_skbs(bp);
2631 }
2632
2633 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2634 {
2635         struct pci_dev *pdev = bp->pdev;
2636         int i;
2637
2638         for (i = 0; i < rmem->nr_pages; i++) {
2639                 if (!rmem->pg_arr[i])
2640                         continue;
2641
2642                 dma_free_coherent(&pdev->dev, rmem->page_size,
2643                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2644
2645                 rmem->pg_arr[i] = NULL;
2646         }
2647         if (rmem->pg_tbl) {
2648                 size_t pg_tbl_size = rmem->nr_pages * 8;
2649
2650                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2651                         pg_tbl_size = rmem->page_size;
2652                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2653                                   rmem->pg_tbl, rmem->pg_tbl_map);
2654                 rmem->pg_tbl = NULL;
2655         }
2656         if (rmem->vmem_size && *rmem->vmem) {
2657                 vfree(*rmem->vmem);
2658                 *rmem->vmem = NULL;
2659         }
2660 }
2661
2662 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2663 {
2664         struct pci_dev *pdev = bp->pdev;
2665         u64 valid_bit = 0;
2666         int i;
2667
2668         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2669                 valid_bit = PTU_PTE_VALID;
2670         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2671                 size_t pg_tbl_size = rmem->nr_pages * 8;
2672
2673                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2674                         pg_tbl_size = rmem->page_size;
2675                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2676                                                   &rmem->pg_tbl_map,
2677                                                   GFP_KERNEL);
2678                 if (!rmem->pg_tbl)
2679                         return -ENOMEM;
2680         }
2681
2682         for (i = 0; i < rmem->nr_pages; i++) {
2683                 u64 extra_bits = valid_bit;
2684
2685                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2686                                                      rmem->page_size,
2687                                                      &rmem->dma_arr[i],
2688                                                      GFP_KERNEL);
2689                 if (!rmem->pg_arr[i])
2690                         return -ENOMEM;
2691
2692                 if (rmem->init_val)
2693                         memset(rmem->pg_arr[i], rmem->init_val,
2694                                rmem->page_size);
2695                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2696                         if (i == rmem->nr_pages - 2 &&
2697                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2698                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2699                         else if (i == rmem->nr_pages - 1 &&
2700                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2701                                 extra_bits |= PTU_PTE_LAST;
2702                         rmem->pg_tbl[i] =
2703                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2704                 }
2705         }
2706
2707         if (rmem->vmem_size) {
2708                 *rmem->vmem = vzalloc(rmem->vmem_size);
2709                 if (!(*rmem->vmem))
2710                         return -ENOMEM;
2711         }
2712         return 0;
2713 }
2714
2715 static void bnxt_free_tpa_info(struct bnxt *bp)
2716 {
2717         int i;
2718
2719         for (i = 0; i < bp->rx_nr_rings; i++) {
2720                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2721
2722                 kfree(rxr->rx_tpa_idx_map);
2723                 rxr->rx_tpa_idx_map = NULL;
2724                 if (rxr->rx_tpa) {
2725                         kfree(rxr->rx_tpa[0].agg_arr);
2726                         rxr->rx_tpa[0].agg_arr = NULL;
2727                 }
2728                 kfree(rxr->rx_tpa);
2729                 rxr->rx_tpa = NULL;
2730         }
2731 }
2732
2733 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2734 {
2735         int i, j, total_aggs = 0;
2736
2737         bp->max_tpa = MAX_TPA;
2738         if (bp->flags & BNXT_FLAG_CHIP_P5) {
2739                 if (!bp->max_tpa_v2)
2740                         return 0;
2741                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2742                 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2743         }
2744
2745         for (i = 0; i < bp->rx_nr_rings; i++) {
2746                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2747                 struct rx_agg_cmp *agg;
2748
2749                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2750                                       GFP_KERNEL);
2751                 if (!rxr->rx_tpa)
2752                         return -ENOMEM;
2753
2754                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2755                         continue;
2756                 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2757                 rxr->rx_tpa[0].agg_arr = agg;
2758                 if (!agg)
2759                         return -ENOMEM;
2760                 for (j = 1; j < bp->max_tpa; j++)
2761                         rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2762                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2763                                               GFP_KERNEL);
2764                 if (!rxr->rx_tpa_idx_map)
2765                         return -ENOMEM;
2766         }
2767         return 0;
2768 }
2769
2770 static void bnxt_free_rx_rings(struct bnxt *bp)
2771 {
2772         int i;
2773
2774         if (!bp->rx_ring)
2775                 return;
2776
2777         bnxt_free_tpa_info(bp);
2778         for (i = 0; i < bp->rx_nr_rings; i++) {
2779                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2780                 struct bnxt_ring_struct *ring;
2781
2782                 if (rxr->xdp_prog)
2783                         bpf_prog_put(rxr->xdp_prog);
2784
2785                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2786                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2787
2788                 page_pool_destroy(rxr->page_pool);
2789                 rxr->page_pool = NULL;
2790
2791                 kfree(rxr->rx_agg_bmap);
2792                 rxr->rx_agg_bmap = NULL;
2793
2794                 ring = &rxr->rx_ring_struct;
2795                 bnxt_free_ring(bp, &ring->ring_mem);
2796
2797                 ring = &rxr->rx_agg_ring_struct;
2798                 bnxt_free_ring(bp, &ring->ring_mem);
2799         }
2800 }
2801
2802 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2803                                    struct bnxt_rx_ring_info *rxr)
2804 {
2805         struct page_pool_params pp = { 0 };
2806
2807         pp.pool_size = bp->rx_ring_size;
2808         pp.nid = dev_to_node(&bp->pdev->dev);
2809         pp.dev = &bp->pdev->dev;
2810         pp.dma_dir = DMA_BIDIRECTIONAL;
2811
2812         rxr->page_pool = page_pool_create(&pp);
2813         if (IS_ERR(rxr->page_pool)) {
2814                 int err = PTR_ERR(rxr->page_pool);
2815
2816                 rxr->page_pool = NULL;
2817                 return err;
2818         }
2819         return 0;
2820 }
2821
2822 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2823 {
2824         int i, rc = 0, agg_rings = 0;
2825
2826         if (!bp->rx_ring)
2827                 return -ENOMEM;
2828
2829         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2830                 agg_rings = 1;
2831
2832         for (i = 0; i < bp->rx_nr_rings; i++) {
2833                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2834                 struct bnxt_ring_struct *ring;
2835
2836                 ring = &rxr->rx_ring_struct;
2837
2838                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2839                 if (rc)
2840                         return rc;
2841
2842                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2843                 if (rc < 0)
2844                         return rc;
2845
2846                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
2847                                                 MEM_TYPE_PAGE_POOL,
2848                                                 rxr->page_pool);
2849                 if (rc) {
2850                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2851                         return rc;
2852                 }
2853
2854                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2855                 if (rc)
2856                         return rc;
2857
2858                 ring->grp_idx = i;
2859                 if (agg_rings) {
2860                         u16 mem_size;
2861
2862                         ring = &rxr->rx_agg_ring_struct;
2863                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2864                         if (rc)
2865                                 return rc;
2866
2867                         ring->grp_idx = i;
2868                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2869                         mem_size = rxr->rx_agg_bmap_size / 8;
2870                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2871                         if (!rxr->rx_agg_bmap)
2872                                 return -ENOMEM;
2873                 }
2874         }
2875         if (bp->flags & BNXT_FLAG_TPA)
2876                 rc = bnxt_alloc_tpa_info(bp);
2877         return rc;
2878 }
2879
2880 static void bnxt_free_tx_rings(struct bnxt *bp)
2881 {
2882         int i;
2883         struct pci_dev *pdev = bp->pdev;
2884
2885         if (!bp->tx_ring)
2886                 return;
2887
2888         for (i = 0; i < bp->tx_nr_rings; i++) {
2889                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2890                 struct bnxt_ring_struct *ring;
2891
2892                 if (txr->tx_push) {
2893                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2894                                           txr->tx_push, txr->tx_push_mapping);
2895                         txr->tx_push = NULL;
2896                 }
2897
2898                 ring = &txr->tx_ring_struct;
2899
2900                 bnxt_free_ring(bp, &ring->ring_mem);
2901         }
2902 }
2903
2904 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2905 {
2906         int i, j, rc;
2907         struct pci_dev *pdev = bp->pdev;
2908
2909         bp->tx_push_size = 0;
2910         if (bp->tx_push_thresh) {
2911                 int push_size;
2912
2913                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2914                                         bp->tx_push_thresh);
2915
2916                 if (push_size > 256) {
2917                         push_size = 0;
2918                         bp->tx_push_thresh = 0;
2919                 }
2920
2921                 bp->tx_push_size = push_size;
2922         }
2923
2924         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2925                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2926                 struct bnxt_ring_struct *ring;
2927                 u8 qidx;
2928
2929                 ring = &txr->tx_ring_struct;
2930
2931                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2932                 if (rc)
2933                         return rc;
2934
2935                 ring->grp_idx = txr->bnapi->index;
2936                 if (bp->tx_push_size) {
2937                         dma_addr_t mapping;
2938
2939                         /* One pre-allocated DMA buffer to backup
2940                          * TX push operation
2941                          */
2942                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2943                                                 bp->tx_push_size,
2944                                                 &txr->tx_push_mapping,
2945                                                 GFP_KERNEL);
2946
2947                         if (!txr->tx_push)
2948                                 return -ENOMEM;
2949
2950                         mapping = txr->tx_push_mapping +
2951                                 sizeof(struct tx_push_bd);
2952                         txr->data_mapping = cpu_to_le64(mapping);
2953                 }
2954                 qidx = bp->tc_to_qidx[j];
2955                 ring->queue_id = bp->q_info[qidx].queue_id;
2956                 if (i < bp->tx_nr_rings_xdp)
2957                         continue;
2958                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2959                         j++;
2960         }
2961         return 0;
2962 }
2963
2964 static void bnxt_free_cp_rings(struct bnxt *bp)
2965 {
2966         int i;
2967
2968         if (!bp->bnapi)
2969                 return;
2970
2971         for (i = 0; i < bp->cp_nr_rings; i++) {
2972                 struct bnxt_napi *bnapi = bp->bnapi[i];
2973                 struct bnxt_cp_ring_info *cpr;
2974                 struct bnxt_ring_struct *ring;
2975                 int j;
2976
2977                 if (!bnapi)
2978                         continue;
2979
2980                 cpr = &bnapi->cp_ring;
2981                 ring = &cpr->cp_ring_struct;
2982
2983                 bnxt_free_ring(bp, &ring->ring_mem);
2984
2985                 for (j = 0; j < 2; j++) {
2986                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2987
2988                         if (cpr2) {
2989                                 ring = &cpr2->cp_ring_struct;
2990                                 bnxt_free_ring(bp, &ring->ring_mem);
2991                                 kfree(cpr2);
2992                                 cpr->cp_ring_arr[j] = NULL;
2993                         }
2994                 }
2995         }
2996 }
2997
2998 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2999 {
3000         struct bnxt_ring_mem_info *rmem;
3001         struct bnxt_ring_struct *ring;
3002         struct bnxt_cp_ring_info *cpr;
3003         int rc;
3004
3005         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3006         if (!cpr)
3007                 return NULL;
3008
3009         ring = &cpr->cp_ring_struct;
3010         rmem = &ring->ring_mem;
3011         rmem->nr_pages = bp->cp_nr_pages;
3012         rmem->page_size = HW_CMPD_RING_SIZE;
3013         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3014         rmem->dma_arr = cpr->cp_desc_mapping;
3015         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3016         rc = bnxt_alloc_ring(bp, rmem);
3017         if (rc) {
3018                 bnxt_free_ring(bp, rmem);
3019                 kfree(cpr);
3020                 cpr = NULL;
3021         }
3022         return cpr;
3023 }
3024
3025 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3026 {
3027         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3028         int i, rc, ulp_base_vec, ulp_msix;
3029
3030         ulp_msix = bnxt_get_ulp_msix_num(bp);
3031         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3032         for (i = 0; i < bp->cp_nr_rings; i++) {
3033                 struct bnxt_napi *bnapi = bp->bnapi[i];
3034                 struct bnxt_cp_ring_info *cpr;
3035                 struct bnxt_ring_struct *ring;
3036
3037                 if (!bnapi)
3038                         continue;
3039
3040                 cpr = &bnapi->cp_ring;
3041                 cpr->bnapi = bnapi;
3042                 ring = &cpr->cp_ring_struct;
3043
3044                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3045                 if (rc)
3046                         return rc;
3047
3048                 if (ulp_msix && i >= ulp_base_vec)
3049                         ring->map_idx = i + ulp_msix;
3050                 else
3051                         ring->map_idx = i;
3052
3053                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3054                         continue;
3055
3056                 if (i < bp->rx_nr_rings) {
3057                         struct bnxt_cp_ring_info *cpr2 =
3058                                 bnxt_alloc_cp_sub_ring(bp);
3059
3060                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3061                         if (!cpr2)
3062                                 return -ENOMEM;
3063                         cpr2->bnapi = bnapi;
3064                 }
3065                 if ((sh && i < bp->tx_nr_rings) ||
3066                     (!sh && i >= bp->rx_nr_rings)) {
3067                         struct bnxt_cp_ring_info *cpr2 =
3068                                 bnxt_alloc_cp_sub_ring(bp);
3069
3070                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3071                         if (!cpr2)
3072                                 return -ENOMEM;
3073                         cpr2->bnapi = bnapi;
3074                 }
3075         }
3076         return 0;
3077 }
3078
3079 static void bnxt_init_ring_struct(struct bnxt *bp)
3080 {
3081         int i;
3082
3083         for (i = 0; i < bp->cp_nr_rings; i++) {
3084                 struct bnxt_napi *bnapi = bp->bnapi[i];
3085                 struct bnxt_ring_mem_info *rmem;
3086                 struct bnxt_cp_ring_info *cpr;
3087                 struct bnxt_rx_ring_info *rxr;
3088                 struct bnxt_tx_ring_info *txr;
3089                 struct bnxt_ring_struct *ring;
3090
3091                 if (!bnapi)
3092                         continue;
3093
3094                 cpr = &bnapi->cp_ring;
3095                 ring = &cpr->cp_ring_struct;
3096                 rmem = &ring->ring_mem;
3097                 rmem->nr_pages = bp->cp_nr_pages;
3098                 rmem->page_size = HW_CMPD_RING_SIZE;
3099                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3100                 rmem->dma_arr = cpr->cp_desc_mapping;
3101                 rmem->vmem_size = 0;
3102
3103                 rxr = bnapi->rx_ring;
3104                 if (!rxr)
3105                         goto skip_rx;
3106
3107                 ring = &rxr->rx_ring_struct;
3108                 rmem = &ring->ring_mem;
3109                 rmem->nr_pages = bp->rx_nr_pages;
3110                 rmem->page_size = HW_RXBD_RING_SIZE;
3111                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3112                 rmem->dma_arr = rxr->rx_desc_mapping;
3113                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3114                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3115
3116                 ring = &rxr->rx_agg_ring_struct;
3117                 rmem = &ring->ring_mem;
3118                 rmem->nr_pages = bp->rx_agg_nr_pages;
3119                 rmem->page_size = HW_RXBD_RING_SIZE;
3120                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3121                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3122                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3123                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3124
3125 skip_rx:
3126                 txr = bnapi->tx_ring;
3127                 if (!txr)
3128                         continue;
3129
3130                 ring = &txr->tx_ring_struct;
3131                 rmem = &ring->ring_mem;
3132                 rmem->nr_pages = bp->tx_nr_pages;
3133                 rmem->page_size = HW_RXBD_RING_SIZE;
3134                 rmem->pg_arr = (void **)txr->tx_desc_ring;
3135                 rmem->dma_arr = txr->tx_desc_mapping;
3136                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3137                 rmem->vmem = (void **)&txr->tx_buf_ring;
3138         }
3139 }
3140
3141 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3142 {
3143         int i;
3144         u32 prod;
3145         struct rx_bd **rx_buf_ring;
3146
3147         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3148         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3149                 int j;
3150                 struct rx_bd *rxbd;
3151
3152                 rxbd = rx_buf_ring[i];
3153                 if (!rxbd)
3154                         continue;
3155
3156                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3157                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3158                         rxbd->rx_bd_opaque = prod;
3159                 }
3160         }
3161 }
3162
3163 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3164 {
3165         struct net_device *dev = bp->dev;
3166         struct bnxt_rx_ring_info *rxr;
3167         struct bnxt_ring_struct *ring;
3168         u32 prod, type;
3169         int i;
3170
3171         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3172                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3173
3174         if (NET_IP_ALIGN == 2)
3175                 type |= RX_BD_FLAGS_SOP;
3176
3177         rxr = &bp->rx_ring[ring_nr];
3178         ring = &rxr->rx_ring_struct;
3179         bnxt_init_rxbd_pages(ring, type);
3180
3181         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3182                 bpf_prog_add(bp->xdp_prog, 1);
3183                 rxr->xdp_prog = bp->xdp_prog;
3184         }
3185         prod = rxr->rx_prod;
3186         for (i = 0; i < bp->rx_ring_size; i++) {
3187                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
3188                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3189                                     ring_nr, i, bp->rx_ring_size);
3190                         break;
3191                 }
3192                 prod = NEXT_RX(prod);
3193         }
3194         rxr->rx_prod = prod;
3195         ring->fw_ring_id = INVALID_HW_RING_ID;
3196
3197         ring = &rxr->rx_agg_ring_struct;
3198         ring->fw_ring_id = INVALID_HW_RING_ID;
3199
3200         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3201                 return 0;
3202
3203         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3204                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3205
3206         bnxt_init_rxbd_pages(ring, type);
3207
3208         prod = rxr->rx_agg_prod;
3209         for (i = 0; i < bp->rx_agg_ring_size; i++) {
3210                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
3211                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3212                                     ring_nr, i, bp->rx_ring_size);
3213                         break;
3214                 }
3215                 prod = NEXT_RX_AGG(prod);
3216         }
3217         rxr->rx_agg_prod = prod;
3218
3219         if (bp->flags & BNXT_FLAG_TPA) {
3220                 if (rxr->rx_tpa) {
3221                         u8 *data;
3222                         dma_addr_t mapping;
3223
3224                         for (i = 0; i < bp->max_tpa; i++) {
3225                                 data = __bnxt_alloc_rx_data(bp, &mapping,
3226                                                             GFP_KERNEL);
3227                                 if (!data)
3228                                         return -ENOMEM;
3229
3230                                 rxr->rx_tpa[i].data = data;
3231                                 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3232                                 rxr->rx_tpa[i].mapping = mapping;
3233                         }
3234                 } else {
3235                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
3236                         return -ENOMEM;
3237                 }
3238         }
3239
3240         return 0;
3241 }
3242
3243 static void bnxt_init_cp_rings(struct bnxt *bp)
3244 {
3245         int i, j;
3246
3247         for (i = 0; i < bp->cp_nr_rings; i++) {
3248                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3249                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3250
3251                 ring->fw_ring_id = INVALID_HW_RING_ID;
3252                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3253                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3254                 for (j = 0; j < 2; j++) {
3255                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3256
3257                         if (!cpr2)
3258                                 continue;
3259
3260                         ring = &cpr2->cp_ring_struct;
3261                         ring->fw_ring_id = INVALID_HW_RING_ID;
3262                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3263                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3264                 }
3265         }
3266 }
3267
3268 static int bnxt_init_rx_rings(struct bnxt *bp)
3269 {
3270         int i, rc = 0;
3271
3272         if (BNXT_RX_PAGE_MODE(bp)) {
3273                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3274                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3275         } else {
3276                 bp->rx_offset = BNXT_RX_OFFSET;
3277                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3278         }
3279
3280         for (i = 0; i < bp->rx_nr_rings; i++) {
3281                 rc = bnxt_init_one_rx_ring(bp, i);
3282                 if (rc)
3283                         break;
3284         }
3285
3286         return rc;
3287 }
3288
3289 static int bnxt_init_tx_rings(struct bnxt *bp)
3290 {
3291         u16 i;
3292
3293         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3294                                    MAX_SKB_FRAGS + 1);
3295
3296         for (i = 0; i < bp->tx_nr_rings; i++) {
3297                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3298                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3299
3300                 ring->fw_ring_id = INVALID_HW_RING_ID;
3301         }
3302
3303         return 0;
3304 }
3305
3306 static void bnxt_free_ring_grps(struct bnxt *bp)
3307 {
3308         kfree(bp->grp_info);
3309         bp->grp_info = NULL;
3310 }
3311
3312 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3313 {
3314         int i;
3315
3316         if (irq_re_init) {
3317                 bp->grp_info = kcalloc(bp->cp_nr_rings,
3318                                        sizeof(struct bnxt_ring_grp_info),
3319                                        GFP_KERNEL);
3320                 if (!bp->grp_info)
3321                         return -ENOMEM;
3322         }
3323         for (i = 0; i < bp->cp_nr_rings; i++) {
3324                 if (irq_re_init)
3325                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3326                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3327                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3328                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3329                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3330         }
3331         return 0;
3332 }
3333
3334 static void bnxt_free_vnics(struct bnxt *bp)
3335 {
3336         kfree(bp->vnic_info);
3337         bp->vnic_info = NULL;
3338         bp->nr_vnics = 0;
3339 }
3340
3341 static int bnxt_alloc_vnics(struct bnxt *bp)
3342 {
3343         int num_vnics = 1;
3344
3345 #ifdef CONFIG_RFS_ACCEL
3346         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3347                 num_vnics += bp->rx_nr_rings;
3348 #endif
3349
3350         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3351                 num_vnics++;
3352
3353         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3354                                 GFP_KERNEL);
3355         if (!bp->vnic_info)
3356                 return -ENOMEM;
3357
3358         bp->nr_vnics = num_vnics;
3359         return 0;
3360 }
3361
3362 static void bnxt_init_vnics(struct bnxt *bp)
3363 {
3364         int i;
3365
3366         for (i = 0; i < bp->nr_vnics; i++) {
3367                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3368                 int j;
3369
3370                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3371                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3372                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3373
3374                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3375
3376                 if (bp->vnic_info[i].rss_hash_key) {
3377                         if (i == 0)
3378                                 prandom_bytes(vnic->rss_hash_key,
3379                                               HW_HASH_KEY_SIZE);
3380                         else
3381                                 memcpy(vnic->rss_hash_key,
3382                                        bp->vnic_info[0].rss_hash_key,
3383                                        HW_HASH_KEY_SIZE);
3384                 }
3385         }
3386 }
3387
3388 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3389 {
3390         int pages;
3391
3392         pages = ring_size / desc_per_pg;
3393
3394         if (!pages)
3395                 return 1;
3396
3397         pages++;
3398
3399         while (pages & (pages - 1))
3400                 pages++;
3401
3402         return pages;
3403 }
3404
3405 void bnxt_set_tpa_flags(struct bnxt *bp)
3406 {
3407         bp->flags &= ~BNXT_FLAG_TPA;
3408         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3409                 return;
3410         if (bp->dev->features & NETIF_F_LRO)
3411                 bp->flags |= BNXT_FLAG_LRO;
3412         else if (bp->dev->features & NETIF_F_GRO_HW)
3413                 bp->flags |= BNXT_FLAG_GRO;
3414 }
3415
3416 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3417  * be set on entry.
3418  */
3419 void bnxt_set_ring_params(struct bnxt *bp)
3420 {
3421         u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3422         u32 agg_factor = 0, agg_ring_size = 0;
3423
3424         /* 8 for CRC and VLAN */
3425         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3426
3427         rx_space = rx_size + NET_SKB_PAD +
3428                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3429
3430         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3431         ring_size = bp->rx_ring_size;
3432         bp->rx_agg_ring_size = 0;
3433         bp->rx_agg_nr_pages = 0;
3434
3435         if (bp->flags & BNXT_FLAG_TPA)
3436                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3437
3438         bp->flags &= ~BNXT_FLAG_JUMBO;
3439         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3440                 u32 jumbo_factor;
3441
3442                 bp->flags |= BNXT_FLAG_JUMBO;
3443                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3444                 if (jumbo_factor > agg_factor)
3445                         agg_factor = jumbo_factor;
3446         }
3447         agg_ring_size = ring_size * agg_factor;
3448
3449         if (agg_ring_size) {
3450                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3451                                                         RX_DESC_CNT);
3452                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3453                         u32 tmp = agg_ring_size;
3454
3455                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3456                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3457                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3458                                     tmp, agg_ring_size);
3459                 }
3460                 bp->rx_agg_ring_size = agg_ring_size;
3461                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3462                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3463                 rx_space = rx_size + NET_SKB_PAD +
3464                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3465         }
3466
3467         bp->rx_buf_use_size = rx_size;
3468         bp->rx_buf_size = rx_space;
3469
3470         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3471         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3472
3473         ring_size = bp->tx_ring_size;
3474         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3475         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3476
3477         max_rx_cmpl = bp->rx_ring_size;
3478         /* MAX TPA needs to be added because TPA_START completions are
3479          * immediately recycled, so the TPA completions are not bound by
3480          * the RX ring size.
3481          */
3482         if (bp->flags & BNXT_FLAG_TPA)
3483                 max_rx_cmpl += bp->max_tpa;
3484         /* RX and TPA completions are 32-byte, all others are 16-byte */
3485         ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3486         bp->cp_ring_size = ring_size;
3487
3488         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3489         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3490                 bp->cp_nr_pages = MAX_CP_PAGES;
3491                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3492                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3493                             ring_size, bp->cp_ring_size);
3494         }
3495         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3496         bp->cp_ring_mask = bp->cp_bit - 1;
3497 }
3498
3499 /* Changing allocation mode of RX rings.
3500  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3501  */
3502 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3503 {
3504         if (page_mode) {
3505                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3506                         return -EOPNOTSUPP;
3507                 bp->dev->max_mtu =
3508                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3509                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3510                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3511                 bp->rx_dir = DMA_BIDIRECTIONAL;
3512                 bp->rx_skb_func = bnxt_rx_page_skb;
3513                 /* Disable LRO or GRO_HW */
3514                 netdev_update_features(bp->dev);
3515         } else {
3516                 bp->dev->max_mtu = bp->max_mtu;
3517                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3518                 bp->rx_dir = DMA_FROM_DEVICE;
3519                 bp->rx_skb_func = bnxt_rx_skb;
3520         }
3521         return 0;
3522 }
3523
3524 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3525 {
3526         int i;
3527         struct bnxt_vnic_info *vnic;
3528         struct pci_dev *pdev = bp->pdev;
3529
3530         if (!bp->vnic_info)
3531                 return;
3532
3533         for (i = 0; i < bp->nr_vnics; i++) {
3534                 vnic = &bp->vnic_info[i];
3535
3536                 kfree(vnic->fw_grp_ids);
3537                 vnic->fw_grp_ids = NULL;
3538
3539                 kfree(vnic->uc_list);
3540                 vnic->uc_list = NULL;
3541
3542                 if (vnic->mc_list) {
3543                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3544                                           vnic->mc_list, vnic->mc_list_mapping);
3545                         vnic->mc_list = NULL;
3546                 }
3547
3548                 if (vnic->rss_table) {
3549                         dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3550                                           vnic->rss_table,
3551                                           vnic->rss_table_dma_addr);
3552                         vnic->rss_table = NULL;
3553                 }
3554
3555                 vnic->rss_hash_key = NULL;
3556                 vnic->flags = 0;
3557         }
3558 }
3559
3560 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3561 {
3562         int i, rc = 0, size;
3563         struct bnxt_vnic_info *vnic;
3564         struct pci_dev *pdev = bp->pdev;
3565         int max_rings;
3566
3567         for (i = 0; i < bp->nr_vnics; i++) {
3568                 vnic = &bp->vnic_info[i];
3569
3570                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3571                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3572
3573                         if (mem_size > 0) {
3574                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3575                                 if (!vnic->uc_list) {
3576                                         rc = -ENOMEM;
3577                                         goto out;
3578                                 }
3579                         }
3580                 }
3581
3582                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3583                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3584                         vnic->mc_list =
3585                                 dma_alloc_coherent(&pdev->dev,
3586                                                    vnic->mc_list_size,
3587                                                    &vnic->mc_list_mapping,
3588                                                    GFP_KERNEL);
3589                         if (!vnic->mc_list) {
3590                                 rc = -ENOMEM;
3591                                 goto out;
3592                         }
3593                 }
3594
3595                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3596                         goto vnic_skip_grps;
3597
3598                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3599                         max_rings = bp->rx_nr_rings;
3600                 else
3601                         max_rings = 1;
3602
3603                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3604                 if (!vnic->fw_grp_ids) {
3605                         rc = -ENOMEM;
3606                         goto out;
3607                 }
3608 vnic_skip_grps:
3609                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3610                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3611                         continue;
3612
3613                 /* Allocate rss table and hash key */
3614                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3615                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3616                         size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3617
3618                 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3619                 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3620                                                      vnic->rss_table_size,
3621                                                      &vnic->rss_table_dma_addr,
3622                                                      GFP_KERNEL);
3623                 if (!vnic->rss_table) {
3624                         rc = -ENOMEM;
3625                         goto out;
3626                 }
3627
3628                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3629                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3630         }
3631         return 0;
3632
3633 out:
3634         return rc;
3635 }
3636
3637 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3638 {
3639         struct pci_dev *pdev = bp->pdev;
3640
3641         if (bp->hwrm_cmd_resp_addr) {
3642                 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3643                                   bp->hwrm_cmd_resp_dma_addr);
3644                 bp->hwrm_cmd_resp_addr = NULL;
3645         }
3646
3647         if (bp->hwrm_cmd_kong_resp_addr) {
3648                 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3649                                   bp->hwrm_cmd_kong_resp_addr,
3650                                   bp->hwrm_cmd_kong_resp_dma_addr);
3651                 bp->hwrm_cmd_kong_resp_addr = NULL;
3652         }
3653 }
3654
3655 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3656 {
3657         struct pci_dev *pdev = bp->pdev;
3658
3659         if (bp->hwrm_cmd_kong_resp_addr)
3660                 return 0;
3661
3662         bp->hwrm_cmd_kong_resp_addr =
3663                 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3664                                    &bp->hwrm_cmd_kong_resp_dma_addr,
3665                                    GFP_KERNEL);
3666         if (!bp->hwrm_cmd_kong_resp_addr)
3667                 return -ENOMEM;
3668
3669         return 0;
3670 }
3671
3672 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3673 {
3674         struct pci_dev *pdev = bp->pdev;
3675
3676         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3677                                                    &bp->hwrm_cmd_resp_dma_addr,
3678                                                    GFP_KERNEL);
3679         if (!bp->hwrm_cmd_resp_addr)
3680                 return -ENOMEM;
3681
3682         return 0;
3683 }
3684
3685 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3686 {
3687         if (bp->hwrm_short_cmd_req_addr) {
3688                 struct pci_dev *pdev = bp->pdev;
3689
3690                 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3691                                   bp->hwrm_short_cmd_req_addr,
3692                                   bp->hwrm_short_cmd_req_dma_addr);
3693                 bp->hwrm_short_cmd_req_addr = NULL;
3694         }
3695 }
3696
3697 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3698 {
3699         struct pci_dev *pdev = bp->pdev;
3700
3701         if (bp->hwrm_short_cmd_req_addr)
3702                 return 0;
3703
3704         bp->hwrm_short_cmd_req_addr =
3705                 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3706                                    &bp->hwrm_short_cmd_req_dma_addr,
3707                                    GFP_KERNEL);
3708         if (!bp->hwrm_short_cmd_req_addr)
3709                 return -ENOMEM;
3710
3711         return 0;
3712 }
3713
3714 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3715 {
3716         kfree(stats->hw_masks);
3717         stats->hw_masks = NULL;
3718         kfree(stats->sw_stats);
3719         stats->sw_stats = NULL;
3720         if (stats->hw_stats) {
3721                 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3722                                   stats->hw_stats_map);
3723                 stats->hw_stats = NULL;
3724         }
3725 }
3726
3727 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
3728                                 bool alloc_masks)
3729 {
3730         stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
3731                                              &stats->hw_stats_map, GFP_KERNEL);
3732         if (!stats->hw_stats)
3733                 return -ENOMEM;
3734
3735         stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
3736         if (!stats->sw_stats)
3737                 goto stats_mem_err;
3738
3739         if (alloc_masks) {
3740                 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
3741                 if (!stats->hw_masks)
3742                         goto stats_mem_err;
3743         }
3744         return 0;
3745
3746 stats_mem_err:
3747         bnxt_free_stats_mem(bp, stats);
3748         return -ENOMEM;
3749 }
3750
3751 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
3752 {
3753         int i;
3754
3755         for (i = 0; i < count; i++)
3756                 mask_arr[i] = mask;
3757 }
3758
3759 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
3760 {
3761         int i;
3762
3763         for (i = 0; i < count; i++)
3764                 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
3765 }
3766
3767 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
3768                                     struct bnxt_stats_mem *stats)
3769 {
3770         struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3771         struct hwrm_func_qstats_ext_input req = {0};
3772         __le64 *hw_masks;
3773         int rc;
3774
3775         if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
3776             !(bp->flags & BNXT_FLAG_CHIP_P5))
3777                 return -EOPNOTSUPP;
3778
3779         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
3780         req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3781         mutex_lock(&bp->hwrm_cmd_lock);
3782         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3783         if (rc)
3784                 goto qstat_exit;
3785
3786         hw_masks = &resp->rx_ucast_pkts;
3787         bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
3788
3789 qstat_exit:
3790         mutex_unlock(&bp->hwrm_cmd_lock);
3791         return rc;
3792 }
3793
3794 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
3795 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
3796
3797 static void bnxt_init_stats(struct bnxt *bp)
3798 {
3799         struct bnxt_napi *bnapi = bp->bnapi[0];
3800         struct bnxt_cp_ring_info *cpr;
3801         struct bnxt_stats_mem *stats;
3802         __le64 *rx_stats, *tx_stats;
3803         int rc, rx_count, tx_count;
3804         u64 *rx_masks, *tx_masks;
3805         u64 mask;
3806         u8 flags;
3807
3808         cpr = &bnapi->cp_ring;
3809         stats = &cpr->stats;
3810         rc = bnxt_hwrm_func_qstat_ext(bp, stats);
3811         if (rc) {
3812                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3813                         mask = (1ULL << 48) - 1;
3814                 else
3815                         mask = -1ULL;
3816                 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
3817         }
3818         if (bp->flags & BNXT_FLAG_PORT_STATS) {
3819                 stats = &bp->port_stats;
3820                 rx_stats = stats->hw_stats;
3821                 rx_masks = stats->hw_masks;
3822                 rx_count = sizeof(struct rx_port_stats) / 8;
3823                 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3824                 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3825                 tx_count = sizeof(struct tx_port_stats) / 8;
3826
3827                 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
3828                 rc = bnxt_hwrm_port_qstats(bp, flags);
3829                 if (rc) {
3830                         mask = (1ULL << 40) - 1;
3831
3832                         bnxt_fill_masks(rx_masks, mask, rx_count);
3833                         bnxt_fill_masks(tx_masks, mask, tx_count);
3834                 } else {
3835                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
3836                         bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
3837                         bnxt_hwrm_port_qstats(bp, 0);
3838                 }
3839         }
3840         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
3841                 stats = &bp->rx_port_stats_ext;
3842                 rx_stats = stats->hw_stats;
3843                 rx_masks = stats->hw_masks;
3844                 rx_count = sizeof(struct rx_port_stats_ext) / 8;
3845                 stats = &bp->tx_port_stats_ext;
3846                 tx_stats = stats->hw_stats;
3847                 tx_masks = stats->hw_masks;
3848                 tx_count = sizeof(struct tx_port_stats_ext) / 8;
3849
3850                 flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3851                 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
3852                 if (rc) {
3853                         mask = (1ULL << 40) - 1;
3854
3855                         bnxt_fill_masks(rx_masks, mask, rx_count);
3856                         if (tx_stats)
3857                                 bnxt_fill_masks(tx_masks, mask, tx_count);
3858                 } else {
3859                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
3860                         if (tx_stats)
3861                                 bnxt_copy_hw_masks(tx_masks, tx_stats,
3862                                                    tx_count);
3863                         bnxt_hwrm_port_qstats_ext(bp, 0);
3864                 }
3865         }
3866 }
3867
3868 static void bnxt_free_port_stats(struct bnxt *bp)
3869 {
3870         bp->flags &= ~BNXT_FLAG_PORT_STATS;
3871         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3872
3873         bnxt_free_stats_mem(bp, &bp->port_stats);
3874         bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
3875         bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
3876 }
3877
3878 static void bnxt_free_ring_stats(struct bnxt *bp)
3879 {
3880         int i;
3881
3882         if (!bp->bnapi)
3883                 return;
3884
3885         for (i = 0; i < bp->cp_nr_rings; i++) {
3886                 struct bnxt_napi *bnapi = bp->bnapi[i];
3887                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3888
3889                 bnxt_free_stats_mem(bp, &cpr->stats);
3890         }
3891 }
3892
3893 static int bnxt_alloc_stats(struct bnxt *bp)
3894 {
3895         u32 size, i;
3896         int rc;
3897
3898         size = bp->hw_ring_stats_size;
3899
3900         for (i = 0; i < bp->cp_nr_rings; i++) {
3901                 struct bnxt_napi *bnapi = bp->bnapi[i];
3902                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3903
3904                 cpr->stats.len = size;
3905                 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
3906                 if (rc)
3907                         return rc;
3908
3909                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3910         }
3911
3912         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
3913                 return 0;
3914
3915         if (bp->port_stats.hw_stats)
3916                 goto alloc_ext_stats;
3917
3918         bp->port_stats.len = BNXT_PORT_STATS_SIZE;
3919         rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
3920         if (rc)
3921                 return rc;
3922
3923         bp->flags |= BNXT_FLAG_PORT_STATS;
3924
3925 alloc_ext_stats:
3926         /* Display extended statistics only if FW supports it */
3927         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
3928                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
3929                         return 0;
3930
3931         if (bp->rx_port_stats_ext.hw_stats)
3932                 goto alloc_tx_ext_stats;
3933
3934         bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
3935         rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
3936         /* Extended stats are optional */
3937         if (rc)
3938                 return 0;
3939
3940 alloc_tx_ext_stats:
3941         if (bp->tx_port_stats_ext.hw_stats)
3942                 return 0;
3943
3944         if (bp->hwrm_spec_code >= 0x10902 ||
3945             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
3946                 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
3947                 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
3948                 /* Extended stats are optional */
3949                 if (rc)
3950                         return 0;
3951         }
3952         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3953         return 0;
3954 }
3955
3956 static void bnxt_clear_ring_indices(struct bnxt *bp)
3957 {
3958         int i;
3959
3960         if (!bp->bnapi)
3961                 return;
3962
3963         for (i = 0; i < bp->cp_nr_rings; i++) {
3964                 struct bnxt_napi *bnapi = bp->bnapi[i];
3965                 struct bnxt_cp_ring_info *cpr;
3966                 struct bnxt_rx_ring_info *rxr;
3967                 struct bnxt_tx_ring_info *txr;
3968
3969                 if (!bnapi)
3970                         continue;
3971
3972                 cpr = &bnapi->cp_ring;
3973                 cpr->cp_raw_cons = 0;
3974
3975                 txr = bnapi->tx_ring;
3976                 if (txr) {
3977                         txr->tx_prod = 0;
3978                         txr->tx_cons = 0;
3979                 }
3980
3981                 rxr = bnapi->rx_ring;
3982                 if (rxr) {
3983                         rxr->rx_prod = 0;
3984                         rxr->rx_agg_prod = 0;
3985                         rxr->rx_sw_agg_prod = 0;
3986                         rxr->rx_next_cons = 0;
3987                 }
3988         }
3989 }
3990
3991 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3992 {
3993 #ifdef CONFIG_RFS_ACCEL
3994         int i;
3995
3996         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
3997          * safe to delete the hash table.
3998          */
3999         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4000                 struct hlist_head *head;
4001                 struct hlist_node *tmp;
4002                 struct bnxt_ntuple_filter *fltr;
4003
4004                 head = &bp->ntp_fltr_hash_tbl[i];
4005                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4006                         hlist_del(&fltr->hash);
4007                         kfree(fltr);
4008                 }
4009         }
4010         if (irq_reinit) {
4011                 kfree(bp->ntp_fltr_bmap);
4012                 bp->ntp_fltr_bmap = NULL;
4013         }
4014         bp->ntp_fltr_count = 0;
4015 #endif
4016 }
4017
4018 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4019 {
4020 #ifdef CONFIG_RFS_ACCEL
4021         int i, rc = 0;
4022
4023         if (!(bp->flags & BNXT_FLAG_RFS))
4024                 return 0;
4025
4026         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4027                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4028
4029         bp->ntp_fltr_count = 0;
4030         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4031                                     sizeof(long),
4032                                     GFP_KERNEL);
4033
4034         if (!bp->ntp_fltr_bmap)
4035                 rc = -ENOMEM;
4036
4037         return rc;
4038 #else
4039         return 0;
4040 #endif
4041 }
4042
4043 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4044 {
4045         bnxt_free_vnic_attributes(bp);
4046         bnxt_free_tx_rings(bp);
4047         bnxt_free_rx_rings(bp);
4048         bnxt_free_cp_rings(bp);
4049         bnxt_free_ntp_fltrs(bp, irq_re_init);
4050         if (irq_re_init) {
4051                 bnxt_free_ring_stats(bp);
4052                 if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET))
4053                         bnxt_free_port_stats(bp);
4054                 bnxt_free_ring_grps(bp);
4055                 bnxt_free_vnics(bp);
4056                 kfree(bp->tx_ring_map);
4057                 bp->tx_ring_map = NULL;
4058                 kfree(bp->tx_ring);
4059                 bp->tx_ring = NULL;
4060                 kfree(bp->rx_ring);
4061                 bp->rx_ring = NULL;
4062                 kfree(bp->bnapi);
4063                 bp->bnapi = NULL;
4064         } else {
4065                 bnxt_clear_ring_indices(bp);
4066         }
4067 }
4068
4069 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4070 {
4071         int i, j, rc, size, arr_size;
4072         void *bnapi;
4073
4074         if (irq_re_init) {
4075                 /* Allocate bnapi mem pointer array and mem block for
4076                  * all queues
4077                  */
4078                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4079                                 bp->cp_nr_rings);
4080                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4081                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4082                 if (!bnapi)
4083                         return -ENOMEM;
4084
4085                 bp->bnapi = bnapi;
4086                 bnapi += arr_size;
4087                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4088                         bp->bnapi[i] = bnapi;
4089                         bp->bnapi[i]->index = i;
4090                         bp->bnapi[i]->bp = bp;
4091                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4092                                 struct bnxt_cp_ring_info *cpr =
4093                                         &bp->bnapi[i]->cp_ring;
4094
4095                                 cpr->cp_ring_struct.ring_mem.flags =
4096                                         BNXT_RMEM_RING_PTE_FLAG;
4097                         }
4098                 }
4099
4100                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4101                                       sizeof(struct bnxt_rx_ring_info),
4102                                       GFP_KERNEL);
4103                 if (!bp->rx_ring)
4104                         return -ENOMEM;
4105
4106                 for (i = 0; i < bp->rx_nr_rings; i++) {
4107                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4108
4109                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4110                                 rxr->rx_ring_struct.ring_mem.flags =
4111                                         BNXT_RMEM_RING_PTE_FLAG;
4112                                 rxr->rx_agg_ring_struct.ring_mem.flags =
4113                                         BNXT_RMEM_RING_PTE_FLAG;
4114                         }
4115                         rxr->bnapi = bp->bnapi[i];
4116                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4117                 }
4118
4119                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4120                                       sizeof(struct bnxt_tx_ring_info),
4121                                       GFP_KERNEL);
4122                 if (!bp->tx_ring)
4123                         return -ENOMEM;
4124
4125                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4126                                           GFP_KERNEL);
4127
4128                 if (!bp->tx_ring_map)
4129                         return -ENOMEM;
4130
4131                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4132                         j = 0;
4133                 else
4134                         j = bp->rx_nr_rings;
4135
4136                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4137                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4138
4139                         if (bp->flags & BNXT_FLAG_CHIP_P5)
4140                                 txr->tx_ring_struct.ring_mem.flags =
4141                                         BNXT_RMEM_RING_PTE_FLAG;
4142                         txr->bnapi = bp->bnapi[j];
4143                         bp->bnapi[j]->tx_ring = txr;
4144                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4145                         if (i >= bp->tx_nr_rings_xdp) {
4146                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4147                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
4148                         } else {
4149                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4150                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4151                         }
4152                 }
4153
4154                 rc = bnxt_alloc_stats(bp);
4155                 if (rc)
4156                         goto alloc_mem_err;
4157                 bnxt_init_stats(bp);
4158
4159                 rc = bnxt_alloc_ntp_fltrs(bp);
4160                 if (rc)
4161                         goto alloc_mem_err;
4162
4163                 rc = bnxt_alloc_vnics(bp);
4164                 if (rc)
4165                         goto alloc_mem_err;
4166         }
4167
4168         bnxt_init_ring_struct(bp);
4169
4170         rc = bnxt_alloc_rx_rings(bp);
4171         if (rc)
4172                 goto alloc_mem_err;
4173
4174         rc = bnxt_alloc_tx_rings(bp);
4175         if (rc)
4176                 goto alloc_mem_err;
4177
4178         rc = bnxt_alloc_cp_rings(bp);
4179         if (rc)
4180                 goto alloc_mem_err;
4181
4182         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4183                                   BNXT_VNIC_UCAST_FLAG;
4184         rc = bnxt_alloc_vnic_attributes(bp);
4185         if (rc)
4186                 goto alloc_mem_err;
4187         return 0;
4188
4189 alloc_mem_err:
4190         bnxt_free_mem(bp, true);
4191         return rc;
4192 }
4193
4194 static void bnxt_disable_int(struct bnxt *bp)
4195 {
4196         int i;
4197
4198         if (!bp->bnapi)
4199                 return;
4200
4201         for (i = 0; i < bp->cp_nr_rings; i++) {
4202                 struct bnxt_napi *bnapi = bp->bnapi[i];
4203                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4204                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4205
4206                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4207                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4208         }
4209 }
4210
4211 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4212 {
4213         struct bnxt_napi *bnapi = bp->bnapi[n];
4214         struct bnxt_cp_ring_info *cpr;
4215
4216         cpr = &bnapi->cp_ring;
4217         return cpr->cp_ring_struct.map_idx;
4218 }
4219
4220 static void bnxt_disable_int_sync(struct bnxt *bp)
4221 {
4222         int i;
4223
4224         atomic_inc(&bp->intr_sem);
4225
4226         bnxt_disable_int(bp);
4227         for (i = 0; i < bp->cp_nr_rings; i++) {
4228                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4229
4230                 synchronize_irq(bp->irq_tbl[map_idx].vector);
4231         }
4232 }
4233
4234 static void bnxt_enable_int(struct bnxt *bp)
4235 {
4236         int i;
4237
4238         atomic_set(&bp->intr_sem, 0);
4239         for (i = 0; i < bp->cp_nr_rings; i++) {
4240                 struct bnxt_napi *bnapi = bp->bnapi[i];
4241                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4242
4243                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4244         }
4245 }
4246
4247 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4248                             u16 cmpl_ring, u16 target_id)
4249 {
4250         struct input *req = request;
4251
4252         req->req_type = cpu_to_le16(req_type);
4253         req->cmpl_ring = cpu_to_le16(cmpl_ring);
4254         req->target_id = cpu_to_le16(target_id);
4255         if (bnxt_kong_hwrm_message(bp, req))
4256                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4257         else
4258                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4259 }
4260
4261 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4262 {
4263         switch (hwrm_err) {
4264         case HWRM_ERR_CODE_SUCCESS:
4265                 return 0;
4266         case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4267                 return -EACCES;
4268         case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4269                 return -ENOSPC;
4270         case HWRM_ERR_CODE_INVALID_PARAMS:
4271         case HWRM_ERR_CODE_INVALID_FLAGS:
4272         case HWRM_ERR_CODE_INVALID_ENABLES:
4273         case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4274         case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4275                 return -EINVAL;
4276         case HWRM_ERR_CODE_NO_BUFFER:
4277                 return -ENOMEM;
4278         case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4279         case HWRM_ERR_CODE_BUSY:
4280                 return -EAGAIN;
4281         case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4282                 return -EOPNOTSUPP;
4283         default:
4284                 return -EIO;
4285         }
4286 }
4287
4288 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4289                                  int timeout, bool silent)
4290 {
4291         int i, intr_process, rc, tmo_count;
4292         struct input *req = msg;
4293         u32 *data = msg;
4294         u8 *valid;
4295         u16 cp_ring_id, len = 0;
4296         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4297         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4298         struct hwrm_short_input short_input = {0};
4299         u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4300         u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4301         u16 dst = BNXT_HWRM_CHNL_CHIMP;
4302
4303         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4304                 return -EBUSY;
4305
4306         if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4307                 if (msg_len > bp->hwrm_max_ext_req_len ||
4308                     !bp->hwrm_short_cmd_req_addr)
4309                         return -EINVAL;
4310         }
4311
4312         if (bnxt_hwrm_kong_chnl(bp, req)) {
4313                 dst = BNXT_HWRM_CHNL_KONG;
4314                 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4315                 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4316                 resp = bp->hwrm_cmd_kong_resp_addr;
4317         }
4318
4319         memset(resp, 0, PAGE_SIZE);
4320         cp_ring_id = le16_to_cpu(req->cmpl_ring);
4321         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4322
4323         req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4324         /* currently supports only one outstanding message */
4325         if (intr_process)
4326                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4327
4328         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4329             msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4330                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4331                 u16 max_msg_len;
4332
4333                 /* Set boundary for maximum extended request length for short
4334                  * cmd format. If passed up from device use the max supported
4335                  * internal req length.
4336                  */
4337                 max_msg_len = bp->hwrm_max_ext_req_len;
4338
4339                 memcpy(short_cmd_req, req, msg_len);
4340                 if (msg_len < max_msg_len)
4341                         memset(short_cmd_req + msg_len, 0,
4342                                max_msg_len - msg_len);
4343
4344                 short_input.req_type = req->req_type;
4345                 short_input.signature =
4346                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4347                 short_input.size = cpu_to_le16(msg_len);
4348                 short_input.req_addr =
4349                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4350
4351                 data = (u32 *)&short_input;
4352                 msg_len = sizeof(short_input);
4353
4354                 /* Sync memory write before updating doorbell */
4355                 wmb();
4356
4357                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4358         }
4359
4360         /* Write request msg to hwrm channel */
4361         __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4362
4363         for (i = msg_len; i < max_req_len; i += 4)
4364                 writel(0, bp->bar0 + bar_offset + i);
4365
4366         /* Ring channel doorbell */
4367         writel(1, bp->bar0 + doorbell_offset);
4368
4369         if (!pci_is_enabled(bp->pdev))
4370                 return 0;
4371
4372         if (!timeout)
4373                 timeout = DFLT_HWRM_CMD_TIMEOUT;
4374         /* convert timeout to usec */
4375         timeout *= 1000;
4376
4377         i = 0;
4378         /* Short timeout for the first few iterations:
4379          * number of loops = number of loops for short timeout +
4380          * number of loops for standard timeout.
4381          */
4382         tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4383         timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4384         tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4385
4386         if (intr_process) {
4387                 u16 seq_id = bp->hwrm_intr_seq_id;
4388
4389                 /* Wait until hwrm response cmpl interrupt is processed */
4390                 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4391                        i++ < tmo_count) {
4392                         /* Abort the wait for completion if the FW health
4393                          * check has failed.
4394                          */
4395                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4396                                 return -EBUSY;
4397                         /* on first few passes, just barely sleep */
4398                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4399                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4400                                              HWRM_SHORT_MAX_TIMEOUT);
4401                         else
4402                                 usleep_range(HWRM_MIN_TIMEOUT,
4403                                              HWRM_MAX_TIMEOUT);
4404                 }
4405
4406                 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4407                         if (!silent)
4408                                 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4409                                            le16_to_cpu(req->req_type));
4410                         return -EBUSY;
4411                 }
4412                 len = le16_to_cpu(resp->resp_len);
4413                 valid = ((u8 *)resp) + len - 1;
4414         } else {
4415                 int j;
4416
4417                 /* Check if response len is updated */
4418                 for (i = 0; i < tmo_count; i++) {
4419                         /* Abort the wait for completion if the FW health
4420                          * check has failed.
4421                          */
4422                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4423                                 return -EBUSY;
4424                         len = le16_to_cpu(resp->resp_len);
4425                         if (len)
4426                                 break;
4427                         /* on first few passes, just barely sleep */
4428                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4429                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4430                                              HWRM_SHORT_MAX_TIMEOUT);
4431                         else
4432                                 usleep_range(HWRM_MIN_TIMEOUT,
4433                                              HWRM_MAX_TIMEOUT);
4434                 }
4435
4436                 if (i >= tmo_count) {
4437                         if (!silent)
4438                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4439                                            HWRM_TOTAL_TIMEOUT(i),
4440                                            le16_to_cpu(req->req_type),
4441                                            le16_to_cpu(req->seq_id), len);
4442                         return -EBUSY;
4443                 }
4444
4445                 /* Last byte of resp contains valid bit */
4446                 valid = ((u8 *)resp) + len - 1;
4447                 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4448                         /* make sure we read from updated DMA memory */
4449                         dma_rmb();
4450                         if (*valid)
4451                                 break;
4452                         usleep_range(1, 5);
4453                 }
4454
4455                 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4456                         if (!silent)
4457                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4458                                            HWRM_TOTAL_TIMEOUT(i),
4459                                            le16_to_cpu(req->req_type),
4460                                            le16_to_cpu(req->seq_id), len,
4461                                            *valid);
4462                         return -EBUSY;
4463                 }
4464         }
4465
4466         /* Zero valid bit for compatibility.  Valid bit in an older spec
4467          * may become a new field in a newer spec.  We must make sure that
4468          * a new field not implemented by old spec will read zero.
4469          */
4470         *valid = 0;
4471         rc = le16_to_cpu(resp->error_code);
4472         if (rc && !silent)
4473                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4474                            le16_to_cpu(resp->req_type),
4475                            le16_to_cpu(resp->seq_id), rc);
4476         return bnxt_hwrm_to_stderr(rc);
4477 }
4478
4479 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4480 {
4481         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4482 }
4483
4484 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4485                               int timeout)
4486 {
4487         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4488 }
4489
4490 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4491 {
4492         int rc;
4493
4494         mutex_lock(&bp->hwrm_cmd_lock);
4495         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4496         mutex_unlock(&bp->hwrm_cmd_lock);
4497         return rc;
4498 }
4499
4500 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4501                              int timeout)
4502 {
4503         int rc;
4504
4505         mutex_lock(&bp->hwrm_cmd_lock);
4506         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4507         mutex_unlock(&bp->hwrm_cmd_lock);
4508         return rc;
4509 }
4510
4511 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4512                             bool async_only)
4513 {
4514         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4515         struct hwrm_func_drv_rgtr_input req = {0};
4516         DECLARE_BITMAP(async_events_bmap, 256);
4517         u32 *events = (u32 *)async_events_bmap;
4518         u32 flags;
4519         int rc, i;
4520
4521         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4522
4523         req.enables =
4524                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4525                             FUNC_DRV_RGTR_REQ_ENABLES_VER |
4526                             FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4527
4528         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4529         flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4530         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4531                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4532         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4533                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4534                          FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4535         req.flags = cpu_to_le32(flags);
4536         req.ver_maj_8b = DRV_VER_MAJ;
4537         req.ver_min_8b = DRV_VER_MIN;
4538         req.ver_upd_8b = DRV_VER_UPD;
4539         req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4540         req.ver_min = cpu_to_le16(DRV_VER_MIN);
4541         req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4542
4543         if (BNXT_PF(bp)) {
4544                 u32 data[8];
4545                 int i;
4546
4547                 memset(data, 0, sizeof(data));
4548                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4549                         u16 cmd = bnxt_vf_req_snif[i];
4550                         unsigned int bit, idx;
4551
4552                         idx = cmd / 32;
4553                         bit = cmd % 32;
4554                         data[idx] |= 1 << bit;
4555                 }
4556
4557                 for (i = 0; i < 8; i++)
4558                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4559
4560                 req.enables |=
4561                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4562         }
4563
4564         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4565                 req.flags |= cpu_to_le32(
4566                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4567
4568         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4569         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4570                 u16 event_id = bnxt_async_events_arr[i];
4571
4572                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4573                     !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4574                         continue;
4575                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4576         }
4577         if (bmap && bmap_size) {
4578                 for (i = 0; i < bmap_size; i++) {
4579                         if (test_bit(i, bmap))
4580                                 __set_bit(i, async_events_bmap);
4581                 }
4582         }
4583         for (i = 0; i < 8; i++)
4584                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4585
4586         if (async_only)
4587                 req.enables =
4588                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4589
4590         mutex_lock(&bp->hwrm_cmd_lock);
4591         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4592         if (!rc) {
4593                 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4594                 if (resp->flags &
4595                     cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4596                         bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4597         }
4598         mutex_unlock(&bp->hwrm_cmd_lock);
4599         return rc;
4600 }
4601
4602 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4603 {
4604         struct hwrm_func_drv_unrgtr_input req = {0};
4605
4606         if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4607                 return 0;
4608
4609         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4610         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4611 }
4612
4613 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4614 {
4615         u32 rc = 0;
4616         struct hwrm_tunnel_dst_port_free_input req = {0};
4617
4618         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4619         req.tunnel_type = tunnel_type;
4620
4621         switch (tunnel_type) {
4622         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4623                 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4624                 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4625                 break;
4626         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4627                 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4628                 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4629                 break;
4630         default:
4631                 break;
4632         }
4633
4634         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4635         if (rc)
4636                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4637                            rc);
4638         return rc;
4639 }
4640
4641 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4642                                            u8 tunnel_type)
4643 {
4644         u32 rc = 0;
4645         struct hwrm_tunnel_dst_port_alloc_input req = {0};
4646         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4647
4648         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4649
4650         req.tunnel_type = tunnel_type;
4651         req.tunnel_dst_port_val = port;
4652
4653         mutex_lock(&bp->hwrm_cmd_lock);
4654         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4655         if (rc) {
4656                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4657                            rc);
4658                 goto err_out;
4659         }
4660
4661         switch (tunnel_type) {
4662         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4663                 bp->vxlan_fw_dst_port_id =
4664                         le16_to_cpu(resp->tunnel_dst_port_id);
4665                 break;
4666         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4667                 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4668                 break;
4669         default:
4670                 break;
4671         }
4672
4673 err_out:
4674         mutex_unlock(&bp->hwrm_cmd_lock);
4675         return rc;
4676 }
4677
4678 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4679 {
4680         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4681         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4682
4683         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4684         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4685
4686         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4687         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4688         req.mask = cpu_to_le32(vnic->rx_mask);
4689         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4690 }
4691
4692 #ifdef CONFIG_RFS_ACCEL
4693 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4694                                             struct bnxt_ntuple_filter *fltr)
4695 {
4696         struct hwrm_cfa_ntuple_filter_free_input req = {0};
4697
4698         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4699         req.ntuple_filter_id = fltr->filter_id;
4700         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4701 }
4702
4703 #define BNXT_NTP_FLTR_FLAGS                                     \
4704         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4705          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4706          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4707          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4708          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4709          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4710          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4711          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4712          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4713          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4714          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4715          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4716          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4717          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4718
4719 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4720                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4721
4722 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4723                                              struct bnxt_ntuple_filter *fltr)
4724 {
4725         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4726         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4727         struct flow_keys *keys = &fltr->fkeys;
4728         struct bnxt_vnic_info *vnic;
4729         u32 flags = 0;
4730         int rc = 0;
4731
4732         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4733         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4734
4735         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4736                 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4737                 req.dst_id = cpu_to_le16(fltr->rxq);
4738         } else {
4739                 vnic = &bp->vnic_info[fltr->rxq + 1];
4740                 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4741         }
4742         req.flags = cpu_to_le32(flags);
4743         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4744
4745         req.ethertype = htons(ETH_P_IP);
4746         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4747         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4748         req.ip_protocol = keys->basic.ip_proto;
4749
4750         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4751                 int i;
4752
4753                 req.ethertype = htons(ETH_P_IPV6);
4754                 req.ip_addr_type =
4755                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4756                 *(struct in6_addr *)&req.src_ipaddr[0] =
4757                         keys->addrs.v6addrs.src;
4758                 *(struct in6_addr *)&req.dst_ipaddr[0] =
4759                         keys->addrs.v6addrs.dst;
4760                 for (i = 0; i < 4; i++) {
4761                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4762                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4763                 }
4764         } else {
4765                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4766                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4767                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4768                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4769         }
4770         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4771                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4772                 req.tunnel_type =
4773                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4774         }
4775
4776         req.src_port = keys->ports.src;
4777         req.src_port_mask = cpu_to_be16(0xffff);
4778         req.dst_port = keys->ports.dst;
4779         req.dst_port_mask = cpu_to_be16(0xffff);
4780
4781         mutex_lock(&bp->hwrm_cmd_lock);
4782         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4783         if (!rc) {
4784                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4785                 fltr->filter_id = resp->ntuple_filter_id;
4786         }
4787         mutex_unlock(&bp->hwrm_cmd_lock);
4788         return rc;
4789 }
4790 #endif
4791
4792 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4793                                      u8 *mac_addr)
4794 {
4795         u32 rc = 0;
4796         struct hwrm_cfa_l2_filter_alloc_input req = {0};
4797         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4798
4799         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4800         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4801         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4802                 req.flags |=
4803                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4804         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4805         req.enables =
4806                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4807                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4808                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4809         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4810         req.l2_addr_mask[0] = 0xff;
4811         req.l2_addr_mask[1] = 0xff;
4812         req.l2_addr_mask[2] = 0xff;
4813         req.l2_addr_mask[3] = 0xff;
4814         req.l2_addr_mask[4] = 0xff;
4815         req.l2_addr_mask[5] = 0xff;
4816
4817         mutex_lock(&bp->hwrm_cmd_lock);
4818         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4819         if (!rc)
4820                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4821                                                         resp->l2_filter_id;
4822         mutex_unlock(&bp->hwrm_cmd_lock);
4823         return rc;
4824 }
4825
4826 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4827 {
4828         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4829         int rc = 0;
4830
4831         /* Any associated ntuple filters will also be cleared by firmware. */
4832         mutex_lock(&bp->hwrm_cmd_lock);
4833         for (i = 0; i < num_of_vnics; i++) {
4834                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4835
4836                 for (j = 0; j < vnic->uc_filter_count; j++) {
4837                         struct hwrm_cfa_l2_filter_free_input req = {0};
4838
4839                         bnxt_hwrm_cmd_hdr_init(bp, &req,
4840                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
4841
4842                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
4843
4844                         rc = _hwrm_send_message(bp, &req, sizeof(req),
4845                                                 HWRM_CMD_TIMEOUT);
4846                 }
4847                 vnic->uc_filter_count = 0;
4848         }
4849         mutex_unlock(&bp->hwrm_cmd_lock);
4850
4851         return rc;
4852 }
4853
4854 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4855 {
4856         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4857         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4858         struct hwrm_vnic_tpa_cfg_input req = {0};
4859
4860         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4861                 return 0;
4862
4863         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4864
4865         if (tpa_flags) {
4866                 u16 mss = bp->dev->mtu - 40;
4867                 u32 nsegs, n, segs = 0, flags;
4868
4869                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4870                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4871                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4872                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4873                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4874                 if (tpa_flags & BNXT_FLAG_GRO)
4875                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4876
4877                 req.flags = cpu_to_le32(flags);
4878
4879                 req.enables =
4880                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4881                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4882                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4883
4884                 /* Number of segs are log2 units, and first packet is not
4885                  * included as part of this units.
4886                  */
4887                 if (mss <= BNXT_RX_PAGE_SIZE) {
4888                         n = BNXT_RX_PAGE_SIZE / mss;
4889                         nsegs = (MAX_SKB_FRAGS - 1) * n;
4890                 } else {
4891                         n = mss / BNXT_RX_PAGE_SIZE;
4892                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
4893                                 n++;
4894                         nsegs = (MAX_SKB_FRAGS - n) / n;
4895                 }
4896
4897                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4898                         segs = MAX_TPA_SEGS_P5;
4899                         max_aggs = bp->max_tpa;
4900                 } else {
4901                         segs = ilog2(nsegs);
4902                 }
4903                 req.max_agg_segs = cpu_to_le16(segs);
4904                 req.max_aggs = cpu_to_le16(max_aggs);
4905
4906                 req.min_agg_len = cpu_to_le32(512);
4907         }
4908         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4909
4910         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4911 }
4912
4913 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4914 {
4915         struct bnxt_ring_grp_info *grp_info;
4916
4917         grp_info = &bp->grp_info[ring->grp_idx];
4918         return grp_info->cp_fw_ring_id;
4919 }
4920
4921 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4922 {
4923         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4924                 struct bnxt_napi *bnapi = rxr->bnapi;
4925                 struct bnxt_cp_ring_info *cpr;
4926
4927                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4928                 return cpr->cp_ring_struct.fw_ring_id;
4929         } else {
4930                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4931         }
4932 }
4933
4934 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4935 {
4936         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4937                 struct bnxt_napi *bnapi = txr->bnapi;
4938                 struct bnxt_cp_ring_info *cpr;
4939
4940                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4941                 return cpr->cp_ring_struct.fw_ring_id;
4942         } else {
4943                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4944         }
4945 }
4946
4947 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
4948 {
4949         int entries;
4950
4951         if (bp->flags & BNXT_FLAG_CHIP_P5)
4952                 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
4953         else
4954                 entries = HW_HASH_INDEX_SIZE;
4955
4956         bp->rss_indir_tbl_entries = entries;
4957         bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
4958                                           GFP_KERNEL);
4959         if (!bp->rss_indir_tbl)
4960                 return -ENOMEM;
4961         return 0;
4962 }
4963
4964 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
4965 {
4966         u16 max_rings, max_entries, pad, i;
4967
4968         if (!bp->rx_nr_rings)
4969                 return;
4970
4971         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4972                 max_rings = bp->rx_nr_rings - 1;
4973         else
4974                 max_rings = bp->rx_nr_rings;
4975
4976         max_entries = bnxt_get_rxfh_indir_size(bp->dev);
4977
4978         for (i = 0; i < max_entries; i++)
4979                 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
4980
4981         pad = bp->rss_indir_tbl_entries - max_entries;
4982         if (pad)
4983                 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
4984 }
4985
4986 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
4987 {
4988         u16 i, tbl_size, max_ring = 0;
4989
4990         if (!bp->rss_indir_tbl)
4991                 return 0;
4992
4993         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
4994         for (i = 0; i < tbl_size; i++)
4995                 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
4996         return max_ring;
4997 }
4998
4999 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5000 {
5001         if (bp->flags & BNXT_FLAG_CHIP_P5)
5002                 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5003         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5004                 return 2;
5005         return 1;
5006 }
5007
5008 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5009 {
5010         bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5011         u16 i, j;
5012
5013         /* Fill the RSS indirection table with ring group ids */
5014         for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5015                 if (!no_rss)
5016                         j = bp->rss_indir_tbl[i];
5017                 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5018         }
5019 }
5020
5021 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5022                                       struct bnxt_vnic_info *vnic)
5023 {
5024         __le16 *ring_tbl = vnic->rss_table;
5025         struct bnxt_rx_ring_info *rxr;
5026         u16 tbl_size, i;
5027
5028         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5029
5030         for (i = 0; i < tbl_size; i++) {
5031                 u16 ring_id, j;
5032
5033                 j = bp->rss_indir_tbl[i];
5034                 rxr = &bp->rx_ring[j];
5035
5036                 ring_id = rxr->rx_ring_struct.fw_ring_id;
5037                 *ring_tbl++ = cpu_to_le16(ring_id);
5038                 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5039                 *ring_tbl++ = cpu_to_le16(ring_id);
5040         }
5041 }
5042
5043 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5044 {
5045         if (bp->flags & BNXT_FLAG_CHIP_P5)
5046                 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5047         else
5048                 __bnxt_fill_hw_rss_tbl(bp, vnic);
5049 }
5050
5051 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5052 {
5053         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5054         struct hwrm_vnic_rss_cfg_input req = {0};
5055
5056         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5057             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5058                 return 0;
5059
5060         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5061         if (set_rss) {
5062                 bnxt_fill_hw_rss_tbl(bp, vnic);
5063                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5064                 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5065                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5066                 req.hash_key_tbl_addr =
5067                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
5068         }
5069         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5070         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5071 }
5072
5073 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5074 {
5075         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5076         struct hwrm_vnic_rss_cfg_input req = {0};
5077         dma_addr_t ring_tbl_map;
5078         u32 i, nr_ctxs;
5079
5080         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5081         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5082         if (!set_rss) {
5083                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5084                 return 0;
5085         }
5086         bnxt_fill_hw_rss_tbl(bp, vnic);
5087         req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5088         req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5089         req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5090         ring_tbl_map = vnic->rss_table_dma_addr;
5091         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5092         for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5093                 int rc;
5094
5095                 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5096                 req.ring_table_pair_index = i;
5097                 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5098                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5099                 if (rc)
5100                         return rc;
5101         }
5102         return 0;
5103 }
5104
5105 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5106 {
5107         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5108         struct hwrm_vnic_plcmodes_cfg_input req = {0};
5109
5110         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5111         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5112                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5113                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5114         req.enables =
5115                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5116                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5117         /* thresholds not implemented in firmware yet */
5118         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5119         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5120         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5121         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5122 }
5123
5124 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5125                                         u16 ctx_idx)
5126 {
5127         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
5128
5129         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5130         req.rss_cos_lb_ctx_id =
5131                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5132
5133         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5134         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5135 }
5136
5137 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5138 {
5139         int i, j;
5140
5141         for (i = 0; i < bp->nr_vnics; i++) {
5142                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5143
5144                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5145                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5146                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5147                 }
5148         }
5149         bp->rsscos_nr_ctxs = 0;
5150 }
5151
5152 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5153 {
5154         int rc;
5155         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
5156         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
5157                                                 bp->hwrm_cmd_resp_addr;
5158
5159         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5160                                -1);
5161
5162         mutex_lock(&bp->hwrm_cmd_lock);
5163         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5164         if (!rc)
5165                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5166                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
5167         mutex_unlock(&bp->hwrm_cmd_lock);
5168
5169         return rc;
5170 }
5171
5172 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5173 {
5174         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5175                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5176         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5177 }
5178
5179 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5180 {
5181         unsigned int ring = 0, grp_idx;
5182         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5183         struct hwrm_vnic_cfg_input req = {0};
5184         u16 def_vlan = 0;
5185
5186         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5187
5188         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5189                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5190
5191                 req.default_rx_ring_id =
5192                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5193                 req.default_cmpl_ring_id =
5194                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5195                 req.enables =
5196                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5197                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5198                 goto vnic_mru;
5199         }
5200         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5201         /* Only RSS support for now TBD: COS & LB */
5202         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5203                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5204                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5205                                            VNIC_CFG_REQ_ENABLES_MRU);
5206         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5207                 req.rss_rule =
5208                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5209                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5210                                            VNIC_CFG_REQ_ENABLES_MRU);
5211                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5212         } else {
5213                 req.rss_rule = cpu_to_le16(0xffff);
5214         }
5215
5216         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5217             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5218                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5219                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5220         } else {
5221                 req.cos_rule = cpu_to_le16(0xffff);
5222         }
5223
5224         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5225                 ring = 0;
5226         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5227                 ring = vnic_id - 1;
5228         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5229                 ring = bp->rx_nr_rings - 1;
5230
5231         grp_idx = bp->rx_ring[ring].bnapi->index;
5232         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5233         req.lb_rule = cpu_to_le16(0xffff);
5234 vnic_mru:
5235         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5236
5237         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5238 #ifdef CONFIG_BNXT_SRIOV
5239         if (BNXT_VF(bp))
5240                 def_vlan = bp->vf.vlan;
5241 #endif
5242         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5243                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5244         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5245                 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5246
5247         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5248 }
5249
5250 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5251 {
5252         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5253                 struct hwrm_vnic_free_input req = {0};
5254
5255                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5256                 req.vnic_id =
5257                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5258
5259                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5260                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5261         }
5262 }
5263
5264 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5265 {
5266         u16 i;
5267
5268         for (i = 0; i < bp->nr_vnics; i++)
5269                 bnxt_hwrm_vnic_free_one(bp, i);
5270 }
5271
5272 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5273                                 unsigned int start_rx_ring_idx,
5274                                 unsigned int nr_rings)
5275 {
5276         int rc = 0;
5277         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5278         struct hwrm_vnic_alloc_input req = {0};
5279         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5280         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5281
5282         if (bp->flags & BNXT_FLAG_CHIP_P5)
5283                 goto vnic_no_ring_grps;
5284
5285         /* map ring groups to this vnic */
5286         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5287                 grp_idx = bp->rx_ring[i].bnapi->index;
5288                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5289                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5290                                    j, nr_rings);
5291                         break;
5292                 }
5293                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5294         }
5295
5296 vnic_no_ring_grps:
5297         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5298                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5299         if (vnic_id == 0)
5300                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5301
5302         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5303
5304         mutex_lock(&bp->hwrm_cmd_lock);
5305         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5306         if (!rc)
5307                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5308         mutex_unlock(&bp->hwrm_cmd_lock);
5309         return rc;
5310 }
5311
5312 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5313 {
5314         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5315         struct hwrm_vnic_qcaps_input req = {0};
5316         int rc;
5317
5318         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5319         bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5320         if (bp->hwrm_spec_code < 0x10600)
5321                 return 0;
5322
5323         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5324         mutex_lock(&bp->hwrm_cmd_lock);
5325         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5326         if (!rc) {
5327                 u32 flags = le32_to_cpu(resp->flags);
5328
5329                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5330                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5331                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5332                 if (flags &
5333                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5334                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5335
5336                 /* Older P5 fw before EXT_HW_STATS support did not set
5337                  * VLAN_STRIP_CAP properly.
5338                  */
5339                 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5340                     ((bp->flags & BNXT_FLAG_CHIP_P5) &&
5341                      !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5342                         bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5343                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5344                 if (bp->max_tpa_v2)
5345                         bp->hw_ring_stats_size =
5346                                 sizeof(struct ctx_hw_stats_ext);
5347         }
5348         mutex_unlock(&bp->hwrm_cmd_lock);
5349         return rc;
5350 }
5351
5352 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5353 {
5354         u16 i;
5355         u32 rc = 0;
5356
5357         if (bp->flags & BNXT_FLAG_CHIP_P5)
5358                 return 0;
5359
5360         mutex_lock(&bp->hwrm_cmd_lock);
5361         for (i = 0; i < bp->rx_nr_rings; i++) {
5362                 struct hwrm_ring_grp_alloc_input req = {0};
5363                 struct hwrm_ring_grp_alloc_output *resp =
5364                                         bp->hwrm_cmd_resp_addr;
5365                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5366
5367                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5368
5369                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5370                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5371                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5372                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5373
5374                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5375                                         HWRM_CMD_TIMEOUT);
5376                 if (rc)
5377                         break;
5378
5379                 bp->grp_info[grp_idx].fw_grp_id =
5380                         le32_to_cpu(resp->ring_group_id);
5381         }
5382         mutex_unlock(&bp->hwrm_cmd_lock);
5383         return rc;
5384 }
5385
5386 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5387 {
5388         u16 i;
5389         struct hwrm_ring_grp_free_input req = {0};
5390
5391         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5392                 return;
5393
5394         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5395
5396         mutex_lock(&bp->hwrm_cmd_lock);
5397         for (i = 0; i < bp->cp_nr_rings; i++) {
5398                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5399                         continue;
5400                 req.ring_group_id =
5401                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
5402
5403                 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5404                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5405         }
5406         mutex_unlock(&bp->hwrm_cmd_lock);
5407 }
5408
5409 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5410                                     struct bnxt_ring_struct *ring,
5411                                     u32 ring_type, u32 map_index)
5412 {
5413         int rc = 0, err = 0;
5414         struct hwrm_ring_alloc_input req = {0};
5415         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5416         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5417         struct bnxt_ring_grp_info *grp_info;
5418         u16 ring_id;
5419
5420         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5421
5422         req.enables = 0;
5423         if (rmem->nr_pages > 1) {
5424                 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5425                 /* Page size is in log2 units */
5426                 req.page_size = BNXT_PAGE_SHIFT;
5427                 req.page_tbl_depth = 1;
5428         } else {
5429                 req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5430         }
5431         req.fbo = 0;
5432         /* Association of ring index with doorbell index and MSIX number */
5433         req.logical_id = cpu_to_le16(map_index);
5434
5435         switch (ring_type) {
5436         case HWRM_RING_ALLOC_TX: {
5437                 struct bnxt_tx_ring_info *txr;
5438
5439                 txr = container_of(ring, struct bnxt_tx_ring_info,
5440                                    tx_ring_struct);
5441                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5442                 /* Association of transmit ring with completion ring */
5443                 grp_info = &bp->grp_info[ring->grp_idx];
5444                 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5445                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5446                 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5447                 req.queue_id = cpu_to_le16(ring->queue_id);
5448                 break;
5449         }
5450         case HWRM_RING_ALLOC_RX:
5451                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5452                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5453                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5454                         u16 flags = 0;
5455
5456                         /* Association of rx ring with stats context */
5457                         grp_info = &bp->grp_info[ring->grp_idx];
5458                         req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5459                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5460                         req.enables |= cpu_to_le32(
5461                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5462                         if (NET_IP_ALIGN == 2)
5463                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5464                         req.flags = cpu_to_le16(flags);
5465                 }
5466                 break;
5467         case HWRM_RING_ALLOC_AGG:
5468                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5469                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5470                         /* Association of agg ring with rx ring */
5471                         grp_info = &bp->grp_info[ring->grp_idx];
5472                         req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5473                         req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5474                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5475                         req.enables |= cpu_to_le32(
5476                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5477                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5478                 } else {
5479                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5480                 }
5481                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5482                 break;
5483         case HWRM_RING_ALLOC_CMPL:
5484                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5485                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5486                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5487                         /* Association of cp ring with nq */
5488                         grp_info = &bp->grp_info[map_index];
5489                         req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5490                         req.cq_handle = cpu_to_le64(ring->handle);
5491                         req.enables |= cpu_to_le32(
5492                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5493                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5494                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5495                 }
5496                 break;
5497         case HWRM_RING_ALLOC_NQ:
5498                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5499                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5500                 if (bp->flags & BNXT_FLAG_USING_MSIX)
5501                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5502                 break;
5503         default:
5504                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5505                            ring_type);
5506                 return -1;
5507         }
5508
5509         mutex_lock(&bp->hwrm_cmd_lock);
5510         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5511         err = le16_to_cpu(resp->error_code);
5512         ring_id = le16_to_cpu(resp->ring_id);
5513         mutex_unlock(&bp->hwrm_cmd_lock);
5514
5515         if (rc || err) {
5516                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5517                            ring_type, rc, err);
5518                 return -EIO;
5519         }
5520         ring->fw_ring_id = ring_id;
5521         return rc;
5522 }
5523
5524 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5525 {
5526         int rc;
5527
5528         if (BNXT_PF(bp)) {
5529                 struct hwrm_func_cfg_input req = {0};
5530
5531                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5532                 req.fid = cpu_to_le16(0xffff);
5533                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5534                 req.async_event_cr = cpu_to_le16(idx);
5535                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5536         } else {
5537                 struct hwrm_func_vf_cfg_input req = {0};
5538
5539                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5540                 req.enables =
5541                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5542                 req.async_event_cr = cpu_to_le16(idx);
5543                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5544         }
5545         return rc;
5546 }
5547
5548 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5549                         u32 map_idx, u32 xid)
5550 {
5551         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5552                 if (BNXT_PF(bp))
5553                         db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5554                 else
5555                         db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5556                 switch (ring_type) {
5557                 case HWRM_RING_ALLOC_TX:
5558                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5559                         break;
5560                 case HWRM_RING_ALLOC_RX:
5561                 case HWRM_RING_ALLOC_AGG:
5562                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5563                         break;
5564                 case HWRM_RING_ALLOC_CMPL:
5565                         db->db_key64 = DBR_PATH_L2;
5566                         break;
5567                 case HWRM_RING_ALLOC_NQ:
5568                         db->db_key64 = DBR_PATH_L2;
5569                         break;
5570                 }
5571                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5572         } else {
5573                 db->doorbell = bp->bar1 + map_idx * 0x80;
5574                 switch (ring_type) {
5575                 case HWRM_RING_ALLOC_TX:
5576                         db->db_key32 = DB_KEY_TX;
5577                         break;
5578                 case HWRM_RING_ALLOC_RX:
5579                 case HWRM_RING_ALLOC_AGG:
5580                         db->db_key32 = DB_KEY_RX;
5581                         break;
5582                 case HWRM_RING_ALLOC_CMPL:
5583                         db->db_key32 = DB_KEY_CP;
5584                         break;
5585                 }
5586         }
5587 }
5588
5589 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5590 {
5591         bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5592         int i, rc = 0;
5593         u32 type;
5594
5595         if (bp->flags & BNXT_FLAG_CHIP_P5)
5596                 type = HWRM_RING_ALLOC_NQ;
5597         else
5598                 type = HWRM_RING_ALLOC_CMPL;
5599         for (i = 0; i < bp->cp_nr_rings; i++) {
5600                 struct bnxt_napi *bnapi = bp->bnapi[i];
5601                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5602                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5603                 u32 map_idx = ring->map_idx;
5604                 unsigned int vector;
5605
5606                 vector = bp->irq_tbl[map_idx].vector;
5607                 disable_irq_nosync(vector);
5608                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5609                 if (rc) {
5610                         enable_irq(vector);
5611                         goto err_out;
5612                 }
5613                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5614                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5615                 enable_irq(vector);
5616                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5617
5618                 if (!i) {
5619                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5620                         if (rc)
5621                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5622                 }
5623         }
5624
5625         type = HWRM_RING_ALLOC_TX;
5626         for (i = 0; i < bp->tx_nr_rings; i++) {
5627                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5628                 struct bnxt_ring_struct *ring;
5629                 u32 map_idx;
5630
5631                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5632                         struct bnxt_napi *bnapi = txr->bnapi;
5633                         struct bnxt_cp_ring_info *cpr, *cpr2;
5634                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5635
5636                         cpr = &bnapi->cp_ring;
5637                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5638                         ring = &cpr2->cp_ring_struct;
5639                         ring->handle = BNXT_TX_HDL;
5640                         map_idx = bnapi->index;
5641                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5642                         if (rc)
5643                                 goto err_out;
5644                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5645                                     ring->fw_ring_id);
5646                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5647                 }
5648                 ring = &txr->tx_ring_struct;
5649                 map_idx = i;
5650                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5651                 if (rc)
5652                         goto err_out;
5653                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5654         }
5655
5656         type = HWRM_RING_ALLOC_RX;
5657         for (i = 0; i < bp->rx_nr_rings; i++) {
5658                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5659                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5660                 struct bnxt_napi *bnapi = rxr->bnapi;
5661                 u32 map_idx = bnapi->index;
5662
5663                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5664                 if (rc)
5665                         goto err_out;
5666                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5667                 /* If we have agg rings, post agg buffers first. */
5668                 if (!agg_rings)
5669                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5670                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5671                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5672                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5673                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5674                         struct bnxt_cp_ring_info *cpr2;
5675
5676                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5677                         ring = &cpr2->cp_ring_struct;
5678                         ring->handle = BNXT_RX_HDL;
5679                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5680                         if (rc)
5681                                 goto err_out;
5682                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5683                                     ring->fw_ring_id);
5684                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5685                 }
5686         }
5687
5688         if (agg_rings) {
5689                 type = HWRM_RING_ALLOC_AGG;
5690                 for (i = 0; i < bp->rx_nr_rings; i++) {
5691                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5692                         struct bnxt_ring_struct *ring =
5693                                                 &rxr->rx_agg_ring_struct;
5694                         u32 grp_idx = ring->grp_idx;
5695                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5696
5697                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5698                         if (rc)
5699                                 goto err_out;
5700
5701                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5702                                     ring->fw_ring_id);
5703                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5704                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5705                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5706                 }
5707         }
5708 err_out:
5709         return rc;
5710 }
5711
5712 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5713                                    struct bnxt_ring_struct *ring,
5714                                    u32 ring_type, int cmpl_ring_id)
5715 {
5716         int rc;
5717         struct hwrm_ring_free_input req = {0};
5718         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5719         u16 error_code;
5720
5721         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
5722                 return 0;
5723
5724         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5725         req.ring_type = ring_type;
5726         req.ring_id = cpu_to_le16(ring->fw_ring_id);
5727
5728         mutex_lock(&bp->hwrm_cmd_lock);
5729         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5730         error_code = le16_to_cpu(resp->error_code);
5731         mutex_unlock(&bp->hwrm_cmd_lock);
5732
5733         if (rc || error_code) {
5734                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5735                            ring_type, rc, error_code);
5736                 return -EIO;
5737         }
5738         return 0;
5739 }
5740
5741 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5742 {
5743         u32 type;
5744         int i;
5745
5746         if (!bp->bnapi)
5747                 return;
5748
5749         for (i = 0; i < bp->tx_nr_rings; i++) {
5750                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5751                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5752
5753                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5754                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5755
5756                         hwrm_ring_free_send_msg(bp, ring,
5757                                                 RING_FREE_REQ_RING_TYPE_TX,
5758                                                 close_path ? cmpl_ring_id :
5759                                                 INVALID_HW_RING_ID);
5760                         ring->fw_ring_id = INVALID_HW_RING_ID;
5761                 }
5762         }
5763
5764         for (i = 0; i < bp->rx_nr_rings; i++) {
5765                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5766                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5767                 u32 grp_idx = rxr->bnapi->index;
5768
5769                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5770                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5771
5772                         hwrm_ring_free_send_msg(bp, ring,
5773                                                 RING_FREE_REQ_RING_TYPE_RX,
5774                                                 close_path ? cmpl_ring_id :
5775                                                 INVALID_HW_RING_ID);
5776                         ring->fw_ring_id = INVALID_HW_RING_ID;
5777                         bp->grp_info[grp_idx].rx_fw_ring_id =
5778                                 INVALID_HW_RING_ID;
5779                 }
5780         }
5781
5782         if (bp->flags & BNXT_FLAG_CHIP_P5)
5783                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5784         else
5785                 type = RING_FREE_REQ_RING_TYPE_RX;
5786         for (i = 0; i < bp->rx_nr_rings; i++) {
5787                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5788                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5789                 u32 grp_idx = rxr->bnapi->index;
5790
5791                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5792                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5793
5794                         hwrm_ring_free_send_msg(bp, ring, type,
5795                                                 close_path ? cmpl_ring_id :
5796                                                 INVALID_HW_RING_ID);
5797                         ring->fw_ring_id = INVALID_HW_RING_ID;
5798                         bp->grp_info[grp_idx].agg_fw_ring_id =
5799                                 INVALID_HW_RING_ID;
5800                 }
5801         }
5802
5803         /* The completion rings are about to be freed.  After that the
5804          * IRQ doorbell will not work anymore.  So we need to disable
5805          * IRQ here.
5806          */
5807         bnxt_disable_int_sync(bp);
5808
5809         if (bp->flags & BNXT_FLAG_CHIP_P5)
5810                 type = RING_FREE_REQ_RING_TYPE_NQ;
5811         else
5812                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5813         for (i = 0; i < bp->cp_nr_rings; i++) {
5814                 struct bnxt_napi *bnapi = bp->bnapi[i];
5815                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5816                 struct bnxt_ring_struct *ring;
5817                 int j;
5818
5819                 for (j = 0; j < 2; j++) {
5820                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5821
5822                         if (cpr2) {
5823                                 ring = &cpr2->cp_ring_struct;
5824                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5825                                         continue;
5826                                 hwrm_ring_free_send_msg(bp, ring,
5827                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
5828                                         INVALID_HW_RING_ID);
5829                                 ring->fw_ring_id = INVALID_HW_RING_ID;
5830                         }
5831                 }
5832                 ring = &cpr->cp_ring_struct;
5833                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5834                         hwrm_ring_free_send_msg(bp, ring, type,
5835                                                 INVALID_HW_RING_ID);
5836                         ring->fw_ring_id = INVALID_HW_RING_ID;
5837                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5838                 }
5839         }
5840 }
5841
5842 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5843                            bool shared);
5844
5845 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5846 {
5847         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5848         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5849         struct hwrm_func_qcfg_input req = {0};
5850         int rc;
5851
5852         if (bp->hwrm_spec_code < 0x10601)
5853                 return 0;
5854
5855         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5856         req.fid = cpu_to_le16(0xffff);
5857         mutex_lock(&bp->hwrm_cmd_lock);
5858         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5859         if (rc) {
5860                 mutex_unlock(&bp->hwrm_cmd_lock);
5861                 return rc;
5862         }
5863
5864         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5865         if (BNXT_NEW_RM(bp)) {
5866                 u16 cp, stats;
5867
5868                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5869                 hw_resc->resv_hw_ring_grps =
5870                         le32_to_cpu(resp->alloc_hw_ring_grps);
5871                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5872                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5873                 stats = le16_to_cpu(resp->alloc_stat_ctx);
5874                 hw_resc->resv_irqs = cp;
5875                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5876                         int rx = hw_resc->resv_rx_rings;
5877                         int tx = hw_resc->resv_tx_rings;
5878
5879                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5880                                 rx >>= 1;
5881                         if (cp < (rx + tx)) {
5882                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5883                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5884                                         rx <<= 1;
5885                                 hw_resc->resv_rx_rings = rx;
5886                                 hw_resc->resv_tx_rings = tx;
5887                         }
5888                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5889                         hw_resc->resv_hw_ring_grps = rx;
5890                 }
5891                 hw_resc->resv_cp_rings = cp;
5892                 hw_resc->resv_stat_ctxs = stats;
5893         }
5894         mutex_unlock(&bp->hwrm_cmd_lock);
5895         return 0;
5896 }
5897
5898 /* Caller must hold bp->hwrm_cmd_lock */
5899 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5900 {
5901         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5902         struct hwrm_func_qcfg_input req = {0};
5903         int rc;
5904
5905         if (bp->hwrm_spec_code < 0x10601)
5906                 return 0;
5907
5908         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5909         req.fid = cpu_to_le16(fid);
5910         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5911         if (!rc)
5912                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5913
5914         return rc;
5915 }
5916
5917 static bool bnxt_rfs_supported(struct bnxt *bp);
5918
5919 static void
5920 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5921                              int tx_rings, int rx_rings, int ring_grps,
5922                              int cp_rings, int stats, int vnics)
5923 {
5924         u32 enables = 0;
5925
5926         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5927         req->fid = cpu_to_le16(0xffff);
5928         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5929         req->num_tx_rings = cpu_to_le16(tx_rings);
5930         if (BNXT_NEW_RM(bp)) {
5931                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5932                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5933                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5934                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5935                         enables |= tx_rings + ring_grps ?
5936                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5937                         enables |= rx_rings ?
5938                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5939                 } else {
5940                         enables |= cp_rings ?
5941                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5942                         enables |= ring_grps ?
5943                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5944                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5945                 }
5946                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
5947
5948                 req->num_rx_rings = cpu_to_le16(rx_rings);
5949                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5950                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5951                         req->num_msix = cpu_to_le16(cp_rings);
5952                         req->num_rsscos_ctxs =
5953                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5954                 } else {
5955                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
5956                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5957                         req->num_rsscos_ctxs = cpu_to_le16(1);
5958                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5959                             bnxt_rfs_supported(bp))
5960                                 req->num_rsscos_ctxs =
5961                                         cpu_to_le16(ring_grps + 1);
5962                 }
5963                 req->num_stat_ctxs = cpu_to_le16(stats);
5964                 req->num_vnics = cpu_to_le16(vnics);
5965         }
5966         req->enables = cpu_to_le32(enables);
5967 }
5968
5969 static void
5970 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5971                              struct hwrm_func_vf_cfg_input *req, int tx_rings,
5972                              int rx_rings, int ring_grps, int cp_rings,
5973                              int stats, int vnics)
5974 {
5975         u32 enables = 0;
5976
5977         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5978         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5979         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5980                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5981         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5982         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5983                 enables |= tx_rings + ring_grps ?
5984                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5985         } else {
5986                 enables |= cp_rings ?
5987                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5988                 enables |= ring_grps ?
5989                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5990         }
5991         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
5992         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
5993
5994         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
5995         req->num_tx_rings = cpu_to_le16(tx_rings);
5996         req->num_rx_rings = cpu_to_le16(rx_rings);
5997         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5998                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5999                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6000         } else {
6001                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6002                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6003                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6004         }
6005         req->num_stat_ctxs = cpu_to_le16(stats);
6006         req->num_vnics = cpu_to_le16(vnics);
6007
6008         req->enables = cpu_to_le32(enables);
6009 }
6010
6011 static int
6012 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6013                            int ring_grps, int cp_rings, int stats, int vnics)
6014 {
6015         struct hwrm_func_cfg_input req = {0};
6016         int rc;
6017
6018         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6019                                      cp_rings, stats, vnics);
6020         if (!req.enables)
6021                 return 0;
6022
6023         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6024         if (rc)
6025                 return rc;
6026
6027         if (bp->hwrm_spec_code < 0x10601)
6028                 bp->hw_resc.resv_tx_rings = tx_rings;
6029
6030         return bnxt_hwrm_get_rings(bp);
6031 }
6032
6033 static int
6034 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6035                            int ring_grps, int cp_rings, int stats, int vnics)
6036 {
6037         struct hwrm_func_vf_cfg_input req = {0};
6038         int rc;
6039
6040         if (!BNXT_NEW_RM(bp)) {
6041                 bp->hw_resc.resv_tx_rings = tx_rings;
6042                 return 0;
6043         }
6044
6045         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6046                                      cp_rings, stats, vnics);
6047         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6048         if (rc)
6049                 return rc;
6050
6051         return bnxt_hwrm_get_rings(bp);
6052 }
6053
6054 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6055                                    int cp, int stat, int vnic)
6056 {
6057         if (BNXT_PF(bp))
6058                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6059                                                   vnic);
6060         else
6061                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6062                                                   vnic);
6063 }
6064
6065 int bnxt_nq_rings_in_use(struct bnxt *bp)
6066 {
6067         int cp = bp->cp_nr_rings;
6068         int ulp_msix, ulp_base;
6069
6070         ulp_msix = bnxt_get_ulp_msix_num(bp);
6071         if (ulp_msix) {
6072                 ulp_base = bnxt_get_ulp_msix_base(bp);
6073                 cp += ulp_msix;
6074                 if ((ulp_base + ulp_msix) > cp)
6075                         cp = ulp_base + ulp_msix;
6076         }
6077         return cp;
6078 }
6079
6080 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6081 {
6082         int cp;
6083
6084         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6085                 return bnxt_nq_rings_in_use(bp);
6086
6087         cp = bp->tx_nr_rings + bp->rx_nr_rings;
6088         return cp;
6089 }
6090
6091 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6092 {
6093         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6094         int cp = bp->cp_nr_rings;
6095
6096         if (!ulp_stat)
6097                 return cp;
6098
6099         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6100                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6101
6102         return cp + ulp_stat;
6103 }
6104
6105 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6106 {
6107         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6108         int cp = bnxt_cp_rings_in_use(bp);
6109         int nq = bnxt_nq_rings_in_use(bp);
6110         int rx = bp->rx_nr_rings, stat;
6111         int vnic = 1, grp = rx;
6112
6113         if (bp->hwrm_spec_code < 0x10601)
6114                 return false;
6115
6116         if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
6117                 return true;
6118
6119         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6120                 vnic = rx + 1;
6121         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6122                 rx <<= 1;
6123         stat = bnxt_get_func_stat_ctxs(bp);
6124         if (BNXT_NEW_RM(bp) &&
6125             (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6126              hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6127              (hw_resc->resv_hw_ring_grps != grp &&
6128               !(bp->flags & BNXT_FLAG_CHIP_P5))))
6129                 return true;
6130         if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6131             hw_resc->resv_irqs != nq)
6132                 return true;
6133         return false;
6134 }
6135
6136 static int __bnxt_reserve_rings(struct bnxt *bp)
6137 {
6138         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6139         int cp = bnxt_nq_rings_in_use(bp);
6140         int tx = bp->tx_nr_rings;
6141         int rx = bp->rx_nr_rings;
6142         int grp, rx_rings, rc;
6143         int vnic = 1, stat;
6144         bool sh = false;
6145
6146         if (!bnxt_need_reserve_rings(bp))
6147                 return 0;
6148
6149         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6150                 sh = true;
6151         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6152                 vnic = rx + 1;
6153         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6154                 rx <<= 1;
6155         grp = bp->rx_nr_rings;
6156         stat = bnxt_get_func_stat_ctxs(bp);
6157
6158         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6159         if (rc)
6160                 return rc;
6161
6162         tx = hw_resc->resv_tx_rings;
6163         if (BNXT_NEW_RM(bp)) {
6164                 rx = hw_resc->resv_rx_rings;
6165                 cp = hw_resc->resv_irqs;
6166                 grp = hw_resc->resv_hw_ring_grps;
6167                 vnic = hw_resc->resv_vnics;
6168                 stat = hw_resc->resv_stat_ctxs;
6169         }
6170
6171         rx_rings = rx;
6172         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6173                 if (rx >= 2) {
6174                         rx_rings = rx >> 1;
6175                 } else {
6176                         if (netif_running(bp->dev))
6177                                 return -ENOMEM;
6178
6179                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6180                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6181                         bp->dev->hw_features &= ~NETIF_F_LRO;
6182                         bp->dev->features &= ~NETIF_F_LRO;
6183                         bnxt_set_ring_params(bp);
6184                 }
6185         }
6186         rx_rings = min_t(int, rx_rings, grp);
6187         cp = min_t(int, cp, bp->cp_nr_rings);
6188         if (stat > bnxt_get_ulp_stat_ctxs(bp))
6189                 stat -= bnxt_get_ulp_stat_ctxs(bp);
6190         cp = min_t(int, cp, stat);
6191         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6192         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6193                 rx = rx_rings << 1;
6194         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6195         bp->tx_nr_rings = tx;
6196
6197         /* If we cannot reserve all the RX rings, reset the RSS map only
6198          * if absolutely necessary
6199          */
6200         if (rx_rings != bp->rx_nr_rings) {
6201                 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6202                             rx_rings, bp->rx_nr_rings);
6203                 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6204                     (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6205                      bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6206                      bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6207                         netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6208                         bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6209                 }
6210         }
6211         bp->rx_nr_rings = rx_rings;
6212         bp->cp_nr_rings = cp;
6213
6214         if (!tx || !rx || !cp || !grp || !vnic || !stat)
6215                 return -ENOMEM;
6216
6217         return rc;
6218 }
6219
6220 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6221                                     int ring_grps, int cp_rings, int stats,
6222                                     int vnics)
6223 {
6224         struct hwrm_func_vf_cfg_input req = {0};
6225         u32 flags;
6226
6227         if (!BNXT_NEW_RM(bp))
6228                 return 0;
6229
6230         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6231                                      cp_rings, stats, vnics);
6232         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6233                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6234                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6235                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6236                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6237                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6238         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6239                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6240
6241         req.flags = cpu_to_le32(flags);
6242         return hwrm_send_message_silent(bp, &req, sizeof(req),
6243                                         HWRM_CMD_TIMEOUT);
6244 }
6245
6246 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6247                                     int ring_grps, int cp_rings, int stats,
6248                                     int vnics)
6249 {
6250         struct hwrm_func_cfg_input req = {0};
6251         u32 flags;
6252
6253         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6254                                      cp_rings, stats, vnics);
6255         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6256         if (BNXT_NEW_RM(bp)) {
6257                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6258                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6259                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6260                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6261                 if (bp->flags & BNXT_FLAG_CHIP_P5)
6262                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6263                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6264                 else
6265                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6266         }
6267
6268         req.flags = cpu_to_le32(flags);
6269         return hwrm_send_message_silent(bp, &req, sizeof(req),
6270                                         HWRM_CMD_TIMEOUT);
6271 }
6272
6273 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6274                                  int ring_grps, int cp_rings, int stats,
6275                                  int vnics)
6276 {
6277         if (bp->hwrm_spec_code < 0x10801)
6278                 return 0;
6279
6280         if (BNXT_PF(bp))
6281                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6282                                                 ring_grps, cp_rings, stats,
6283                                                 vnics);
6284
6285         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6286                                         cp_rings, stats, vnics);
6287 }
6288
6289 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6290 {
6291         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6292         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6293         struct hwrm_ring_aggint_qcaps_input req = {0};
6294         int rc;
6295
6296         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6297         coal_cap->num_cmpl_dma_aggr_max = 63;
6298         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6299         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6300         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6301         coal_cap->int_lat_tmr_min_max = 65535;
6302         coal_cap->int_lat_tmr_max_max = 65535;
6303         coal_cap->num_cmpl_aggr_int_max = 65535;
6304         coal_cap->timer_units = 80;
6305
6306         if (bp->hwrm_spec_code < 0x10902)
6307                 return;
6308
6309         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6310         mutex_lock(&bp->hwrm_cmd_lock);
6311         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6312         if (!rc) {
6313                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6314                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6315                 coal_cap->num_cmpl_dma_aggr_max =
6316                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6317                 coal_cap->num_cmpl_dma_aggr_during_int_max =
6318                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6319                 coal_cap->cmpl_aggr_dma_tmr_max =
6320                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6321                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6322                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6323                 coal_cap->int_lat_tmr_min_max =
6324                         le16_to_cpu(resp->int_lat_tmr_min_max);
6325                 coal_cap->int_lat_tmr_max_max =
6326                         le16_to_cpu(resp->int_lat_tmr_max_max);
6327                 coal_cap->num_cmpl_aggr_int_max =
6328                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
6329                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6330         }
6331         mutex_unlock(&bp->hwrm_cmd_lock);
6332 }
6333
6334 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6335 {
6336         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6337
6338         return usec * 1000 / coal_cap->timer_units;
6339 }
6340
6341 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6342         struct bnxt_coal *hw_coal,
6343         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6344 {
6345         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6346         u32 cmpl_params = coal_cap->cmpl_params;
6347         u16 val, tmr, max, flags = 0;
6348
6349         max = hw_coal->bufs_per_record * 128;
6350         if (hw_coal->budget)
6351                 max = hw_coal->bufs_per_record * hw_coal->budget;
6352         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6353
6354         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6355         req->num_cmpl_aggr_int = cpu_to_le16(val);
6356
6357         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6358         req->num_cmpl_dma_aggr = cpu_to_le16(val);
6359
6360         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6361                       coal_cap->num_cmpl_dma_aggr_during_int_max);
6362         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6363
6364         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6365         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6366         req->int_lat_tmr_max = cpu_to_le16(tmr);
6367
6368         /* min timer set to 1/2 of interrupt timer */
6369         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6370                 val = tmr / 2;
6371                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6372                 req->int_lat_tmr_min = cpu_to_le16(val);
6373                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6374         }
6375
6376         /* buf timer set to 1/4 of interrupt timer */
6377         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6378         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6379
6380         if (cmpl_params &
6381             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6382                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6383                 val = clamp_t(u16, tmr, 1,
6384                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6385                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6386                 req->enables |=
6387                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6388         }
6389
6390         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6391                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6392         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6393             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6394                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6395         req->flags = cpu_to_le16(flags);
6396         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6397 }
6398
6399 /* Caller holds bp->hwrm_cmd_lock */
6400 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6401                                    struct bnxt_coal *hw_coal)
6402 {
6403         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6404         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6405         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6406         u32 nq_params = coal_cap->nq_params;
6407         u16 tmr;
6408
6409         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6410                 return 0;
6411
6412         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6413                                -1, -1);
6414         req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6415         req.flags =
6416                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6417
6418         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6419         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6420         req.int_lat_tmr_min = cpu_to_le16(tmr);
6421         req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6422         return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6423 }
6424
6425 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6426 {
6427         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6428         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6429         struct bnxt_coal coal;
6430
6431         /* Tick values in micro seconds.
6432          * 1 coal_buf x bufs_per_record = 1 completion record.
6433          */
6434         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6435
6436         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6437         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6438
6439         if (!bnapi->rx_ring)
6440                 return -ENODEV;
6441
6442         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6443                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6444
6445         bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6446
6447         req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6448
6449         return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6450                                  HWRM_CMD_TIMEOUT);
6451 }
6452
6453 int bnxt_hwrm_set_coal(struct bnxt *bp)
6454 {
6455         int i, rc = 0;
6456         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6457                                                            req_tx = {0}, *req;
6458
6459         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6460                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6461         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6462                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6463
6464         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6465         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6466
6467         mutex_lock(&bp->hwrm_cmd_lock);
6468         for (i = 0; i < bp->cp_nr_rings; i++) {
6469                 struct bnxt_napi *bnapi = bp->bnapi[i];
6470                 struct bnxt_coal *hw_coal;
6471                 u16 ring_id;
6472
6473                 req = &req_rx;
6474                 if (!bnapi->rx_ring) {
6475                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6476                         req = &req_tx;
6477                 } else {
6478                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6479                 }
6480                 req->ring_id = cpu_to_le16(ring_id);
6481
6482                 rc = _hwrm_send_message(bp, req, sizeof(*req),
6483                                         HWRM_CMD_TIMEOUT);
6484                 if (rc)
6485                         break;
6486
6487                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6488                         continue;
6489
6490                 if (bnapi->rx_ring && bnapi->tx_ring) {
6491                         req = &req_tx;
6492                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6493                         req->ring_id = cpu_to_le16(ring_id);
6494                         rc = _hwrm_send_message(bp, req, sizeof(*req),
6495                                                 HWRM_CMD_TIMEOUT);
6496                         if (rc)
6497                                 break;
6498                 }
6499                 if (bnapi->rx_ring)
6500                         hw_coal = &bp->rx_coal;
6501                 else
6502                         hw_coal = &bp->tx_coal;
6503                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6504         }
6505         mutex_unlock(&bp->hwrm_cmd_lock);
6506         return rc;
6507 }
6508
6509 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6510 {
6511         struct hwrm_stat_ctx_clr_stats_input req0 = {0};
6512         struct hwrm_stat_ctx_free_input req = {0};
6513         int i;
6514
6515         if (!bp->bnapi)
6516                 return;
6517
6518         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6519                 return;
6520
6521         bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
6522         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6523
6524         mutex_lock(&bp->hwrm_cmd_lock);
6525         for (i = 0; i < bp->cp_nr_rings; i++) {
6526                 struct bnxt_napi *bnapi = bp->bnapi[i];
6527                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6528
6529                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6530                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6531                         if (BNXT_FW_MAJ(bp) <= 20) {
6532                                 req0.stat_ctx_id = req.stat_ctx_id;
6533                                 _hwrm_send_message(bp, &req0, sizeof(req0),
6534                                                    HWRM_CMD_TIMEOUT);
6535                         }
6536                         _hwrm_send_message(bp, &req, sizeof(req),
6537                                            HWRM_CMD_TIMEOUT);
6538
6539                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6540                 }
6541         }
6542         mutex_unlock(&bp->hwrm_cmd_lock);
6543 }
6544
6545 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6546 {
6547         int rc = 0, i;
6548         struct hwrm_stat_ctx_alloc_input req = {0};
6549         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6550
6551         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6552                 return 0;
6553
6554         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6555
6556         req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6557         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6558
6559         mutex_lock(&bp->hwrm_cmd_lock);
6560         for (i = 0; i < bp->cp_nr_rings; i++) {
6561                 struct bnxt_napi *bnapi = bp->bnapi[i];
6562                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6563
6564                 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6565
6566                 rc = _hwrm_send_message(bp, &req, sizeof(req),
6567                                         HWRM_CMD_TIMEOUT);
6568                 if (rc)
6569                         break;
6570
6571                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6572
6573                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6574         }
6575         mutex_unlock(&bp->hwrm_cmd_lock);
6576         return rc;
6577 }
6578
6579 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6580 {
6581         struct hwrm_func_qcfg_input req = {0};
6582         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6583         u32 min_db_offset = 0;
6584         u16 flags;
6585         int rc;
6586
6587         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6588         req.fid = cpu_to_le16(0xffff);
6589         mutex_lock(&bp->hwrm_cmd_lock);
6590         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6591         if (rc)
6592                 goto func_qcfg_exit;
6593
6594 #ifdef CONFIG_BNXT_SRIOV
6595         if (BNXT_VF(bp)) {
6596                 struct bnxt_vf_info *vf = &bp->vf;
6597
6598                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6599         } else {
6600                 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6601         }
6602 #endif
6603         flags = le16_to_cpu(resp->flags);
6604         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6605                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6606                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6607                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6608                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6609         }
6610         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6611                 bp->flags |= BNXT_FLAG_MULTI_HOST;
6612
6613         switch (resp->port_partition_type) {
6614         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6615         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6616         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6617                 bp->port_partition_type = resp->port_partition_type;
6618                 break;
6619         }
6620         if (bp->hwrm_spec_code < 0x10707 ||
6621             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6622                 bp->br_mode = BRIDGE_MODE_VEB;
6623         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6624                 bp->br_mode = BRIDGE_MODE_VEPA;
6625         else
6626                 bp->br_mode = BRIDGE_MODE_UNDEF;
6627
6628         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6629         if (!bp->max_mtu)
6630                 bp->max_mtu = BNXT_MAX_MTU;
6631
6632         if (bp->db_size)
6633                 goto func_qcfg_exit;
6634
6635         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6636                 if (BNXT_PF(bp))
6637                         min_db_offset = DB_PF_OFFSET_P5;
6638                 else
6639                         min_db_offset = DB_VF_OFFSET_P5;
6640         }
6641         bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6642                                  1024);
6643         if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6644             bp->db_size <= min_db_offset)
6645                 bp->db_size = pci_resource_len(bp->pdev, 2);
6646
6647 func_qcfg_exit:
6648         mutex_unlock(&bp->hwrm_cmd_lock);
6649         return rc;
6650 }
6651
6652 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6653 {
6654         struct hwrm_func_backing_store_qcaps_input req = {0};
6655         struct hwrm_func_backing_store_qcaps_output *resp =
6656                 bp->hwrm_cmd_resp_addr;
6657         int rc;
6658
6659         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6660                 return 0;
6661
6662         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6663         mutex_lock(&bp->hwrm_cmd_lock);
6664         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6665         if (!rc) {
6666                 struct bnxt_ctx_pg_info *ctx_pg;
6667                 struct bnxt_ctx_mem_info *ctx;
6668                 int i, tqm_rings;
6669
6670                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6671                 if (!ctx) {
6672                         rc = -ENOMEM;
6673                         goto ctx_err;
6674                 }
6675                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6676                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6677                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6678                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6679                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6680                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6681                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6682                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6683                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6684                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6685                 ctx->vnic_max_vnic_entries =
6686                         le16_to_cpu(resp->vnic_max_vnic_entries);
6687                 ctx->vnic_max_ring_table_entries =
6688                         le16_to_cpu(resp->vnic_max_ring_table_entries);
6689                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6690                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6691                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6692                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6693                 ctx->tqm_min_entries_per_ring =
6694                         le32_to_cpu(resp->tqm_min_entries_per_ring);
6695                 ctx->tqm_max_entries_per_ring =
6696                         le32_to_cpu(resp->tqm_max_entries_per_ring);
6697                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6698                 if (!ctx->tqm_entries_multiple)
6699                         ctx->tqm_entries_multiple = 1;
6700                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6701                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6702                 ctx->mrav_num_entries_units =
6703                         le16_to_cpu(resp->mrav_num_entries_units);
6704                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6705                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6706                 ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
6707                 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6708                 if (!ctx->tqm_fp_rings_count)
6709                         ctx->tqm_fp_rings_count = bp->max_q;
6710
6711                 tqm_rings = ctx->tqm_fp_rings_count + 1;
6712                 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6713                 if (!ctx_pg) {
6714                         kfree(ctx);
6715                         rc = -ENOMEM;
6716                         goto ctx_err;
6717                 }
6718                 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6719                         ctx->tqm_mem[i] = ctx_pg;
6720                 bp->ctx = ctx;
6721         } else {
6722                 rc = 0;
6723         }
6724 ctx_err:
6725         mutex_unlock(&bp->hwrm_cmd_lock);
6726         return rc;
6727 }
6728
6729 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6730                                   __le64 *pg_dir)
6731 {
6732         u8 pg_size = 0;
6733
6734         if (BNXT_PAGE_SHIFT == 13)
6735                 pg_size = 1 << 4;
6736         else if (BNXT_PAGE_SIZE == 16)
6737                 pg_size = 2 << 4;
6738
6739         *pg_attr = pg_size;
6740         if (rmem->depth >= 1) {
6741                 if (rmem->depth == 2)
6742                         *pg_attr |= 2;
6743                 else
6744                         *pg_attr |= 1;
6745                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6746         } else {
6747                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6748         }
6749 }
6750
6751 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
6752         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
6753          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
6754          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
6755          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
6756          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6757
6758 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6759 {
6760         struct hwrm_func_backing_store_cfg_input req = {0};
6761         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6762         struct bnxt_ctx_pg_info *ctx_pg;
6763         __le32 *num_entries;
6764         __le64 *pg_dir;
6765         u32 flags = 0;
6766         u8 *pg_attr;
6767         u32 ena;
6768         int i;
6769
6770         if (!ctx)
6771                 return 0;
6772
6773         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6774         req.enables = cpu_to_le32(enables);
6775
6776         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6777                 ctx_pg = &ctx->qp_mem;
6778                 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6779                 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6780                 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6781                 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6782                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6783                                       &req.qpc_pg_size_qpc_lvl,
6784                                       &req.qpc_page_dir);
6785         }
6786         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6787                 ctx_pg = &ctx->srq_mem;
6788                 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6789                 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6790                 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6791                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6792                                       &req.srq_pg_size_srq_lvl,
6793                                       &req.srq_page_dir);
6794         }
6795         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6796                 ctx_pg = &ctx->cq_mem;
6797                 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6798                 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6799                 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6800                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6801                                       &req.cq_page_dir);
6802         }
6803         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6804                 ctx_pg = &ctx->vnic_mem;
6805                 req.vnic_num_vnic_entries =
6806                         cpu_to_le16(ctx->vnic_max_vnic_entries);
6807                 req.vnic_num_ring_table_entries =
6808                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
6809                 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6810                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6811                                       &req.vnic_pg_size_vnic_lvl,
6812                                       &req.vnic_page_dir);
6813         }
6814         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6815                 ctx_pg = &ctx->stat_mem;
6816                 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6817                 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6818                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6819                                       &req.stat_pg_size_stat_lvl,
6820                                       &req.stat_page_dir);
6821         }
6822         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6823                 ctx_pg = &ctx->mrav_mem;
6824                 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6825                 if (ctx->mrav_num_entries_units)
6826                         flags |=
6827                         FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
6828                 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6829                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6830                                       &req.mrav_pg_size_mrav_lvl,
6831                                       &req.mrav_page_dir);
6832         }
6833         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6834                 ctx_pg = &ctx->tim_mem;
6835                 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6836                 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6837                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6838                                       &req.tim_pg_size_tim_lvl,
6839                                       &req.tim_page_dir);
6840         }
6841         for (i = 0, num_entries = &req.tqm_sp_num_entries,
6842              pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6843              pg_dir = &req.tqm_sp_page_dir,
6844              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6845              i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6846                 if (!(enables & ena))
6847                         continue;
6848
6849                 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6850                 ctx_pg = ctx->tqm_mem[i];
6851                 *num_entries = cpu_to_le32(ctx_pg->entries);
6852                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6853         }
6854         req.flags = cpu_to_le32(flags);
6855         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6856 }
6857
6858 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6859                                   struct bnxt_ctx_pg_info *ctx_pg)
6860 {
6861         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6862
6863         rmem->page_size = BNXT_PAGE_SIZE;
6864         rmem->pg_arr = ctx_pg->ctx_pg_arr;
6865         rmem->dma_arr = ctx_pg->ctx_dma_arr;
6866         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
6867         if (rmem->depth >= 1)
6868                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
6869         return bnxt_alloc_ring(bp, rmem);
6870 }
6871
6872 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6873                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6874                                   u8 depth, bool use_init_val)
6875 {
6876         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6877         int rc;
6878
6879         if (!mem_size)
6880                 return -EINVAL;
6881
6882         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6883         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6884                 ctx_pg->nr_pages = 0;
6885                 return -EINVAL;
6886         }
6887         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6888                 int nr_tbls, i;
6889
6890                 rmem->depth = 2;
6891                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6892                                              GFP_KERNEL);
6893                 if (!ctx_pg->ctx_pg_tbl)
6894                         return -ENOMEM;
6895                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6896                 rmem->nr_pages = nr_tbls;
6897                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6898                 if (rc)
6899                         return rc;
6900                 for (i = 0; i < nr_tbls; i++) {
6901                         struct bnxt_ctx_pg_info *pg_tbl;
6902
6903                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6904                         if (!pg_tbl)
6905                                 return -ENOMEM;
6906                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6907                         rmem = &pg_tbl->ring_mem;
6908                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6909                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6910                         rmem->depth = 1;
6911                         rmem->nr_pages = MAX_CTX_PAGES;
6912                         if (use_init_val)
6913                                 rmem->init_val = bp->ctx->ctx_kind_initializer;
6914                         if (i == (nr_tbls - 1)) {
6915                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6916
6917                                 if (rem)
6918                                         rmem->nr_pages = rem;
6919                         }
6920                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6921                         if (rc)
6922                                 break;
6923                 }
6924         } else {
6925                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6926                 if (rmem->nr_pages > 1 || depth)
6927                         rmem->depth = 1;
6928                 if (use_init_val)
6929                         rmem->init_val = bp->ctx->ctx_kind_initializer;
6930                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6931         }
6932         return rc;
6933 }
6934
6935 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6936                                   struct bnxt_ctx_pg_info *ctx_pg)
6937 {
6938         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6939
6940         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6941             ctx_pg->ctx_pg_tbl) {
6942                 int i, nr_tbls = rmem->nr_pages;
6943
6944                 for (i = 0; i < nr_tbls; i++) {
6945                         struct bnxt_ctx_pg_info *pg_tbl;
6946                         struct bnxt_ring_mem_info *rmem2;
6947
6948                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
6949                         if (!pg_tbl)
6950                                 continue;
6951                         rmem2 = &pg_tbl->ring_mem;
6952                         bnxt_free_ring(bp, rmem2);
6953                         ctx_pg->ctx_pg_arr[i] = NULL;
6954                         kfree(pg_tbl);
6955                         ctx_pg->ctx_pg_tbl[i] = NULL;
6956                 }
6957                 kfree(ctx_pg->ctx_pg_tbl);
6958                 ctx_pg->ctx_pg_tbl = NULL;
6959         }
6960         bnxt_free_ring(bp, rmem);
6961         ctx_pg->nr_pages = 0;
6962 }
6963
6964 static void bnxt_free_ctx_mem(struct bnxt *bp)
6965 {
6966         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6967         int i;
6968
6969         if (!ctx)
6970                 return;
6971
6972         if (ctx->tqm_mem[0]) {
6973                 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
6974                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
6975                 kfree(ctx->tqm_mem[0]);
6976                 ctx->tqm_mem[0] = NULL;
6977         }
6978
6979         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6980         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
6981         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6982         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6983         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6984         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6985         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
6986         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6987 }
6988
6989 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6990 {
6991         struct bnxt_ctx_pg_info *ctx_pg;
6992         struct bnxt_ctx_mem_info *ctx;
6993         u32 mem_size, ena, entries;
6994         u32 entries_sp, min;
6995         u32 num_mr, num_ah;
6996         u32 extra_srqs = 0;
6997         u32 extra_qps = 0;
6998         u8 pg_lvl = 1;
6999         int i, rc;
7000
7001         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7002         if (rc) {
7003                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7004                            rc);
7005                 return rc;
7006         }
7007         ctx = bp->ctx;
7008         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7009                 return 0;
7010
7011         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7012                 pg_lvl = 2;
7013                 extra_qps = 65536;
7014                 extra_srqs = 8192;
7015         }
7016
7017         ctx_pg = &ctx->qp_mem;
7018         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7019                           extra_qps;
7020         mem_size = ctx->qp_entry_size * ctx_pg->entries;
7021         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7022         if (rc)
7023                 return rc;
7024
7025         ctx_pg = &ctx->srq_mem;
7026         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7027         mem_size = ctx->srq_entry_size * ctx_pg->entries;
7028         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7029         if (rc)
7030                 return rc;
7031
7032         ctx_pg = &ctx->cq_mem;
7033         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7034         mem_size = ctx->cq_entry_size * ctx_pg->entries;
7035         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7036         if (rc)
7037                 return rc;
7038
7039         ctx_pg = &ctx->vnic_mem;
7040         ctx_pg->entries = ctx->vnic_max_vnic_entries +
7041                           ctx->vnic_max_ring_table_entries;
7042         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7043         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
7044         if (rc)
7045                 return rc;
7046
7047         ctx_pg = &ctx->stat_mem;
7048         ctx_pg->entries = ctx->stat_max_entries;
7049         mem_size = ctx->stat_entry_size * ctx_pg->entries;
7050         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
7051         if (rc)
7052                 return rc;
7053
7054         ena = 0;
7055         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7056                 goto skip_rdma;
7057
7058         ctx_pg = &ctx->mrav_mem;
7059         /* 128K extra is needed to accommodate static AH context
7060          * allocation by f/w.
7061          */
7062         num_mr = 1024 * 256;
7063         num_ah = 1024 * 128;
7064         ctx_pg->entries = num_mr + num_ah;
7065         mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7066         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
7067         if (rc)
7068                 return rc;
7069         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7070         if (ctx->mrav_num_entries_units)
7071                 ctx_pg->entries =
7072                         ((num_mr / ctx->mrav_num_entries_units) << 16) |
7073                          (num_ah / ctx->mrav_num_entries_units);
7074
7075         ctx_pg = &ctx->tim_mem;
7076         ctx_pg->entries = ctx->qp_mem.entries;
7077         mem_size = ctx->tim_entry_size * ctx_pg->entries;
7078         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
7079         if (rc)
7080                 return rc;
7081         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7082
7083 skip_rdma:
7084         min = ctx->tqm_min_entries_per_ring;
7085         entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7086                      2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7087         entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7088         entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
7089         entries = roundup(entries, ctx->tqm_entries_multiple);
7090         entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7091         for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7092                 ctx_pg = ctx->tqm_mem[i];
7093                 ctx_pg->entries = i ? entries : entries_sp;
7094                 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7095                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
7096                 if (rc)
7097                         return rc;
7098                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7099         }
7100         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7101         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7102         if (rc) {
7103                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7104                            rc);
7105                 return rc;
7106         }
7107         ctx->flags |= BNXT_CTX_FLAG_INITED;
7108         return 0;
7109 }
7110
7111 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7112 {
7113         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7114         struct hwrm_func_resource_qcaps_input req = {0};
7115         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7116         int rc;
7117
7118         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7119         req.fid = cpu_to_le16(0xffff);
7120
7121         mutex_lock(&bp->hwrm_cmd_lock);
7122         rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7123                                        HWRM_CMD_TIMEOUT);
7124         if (rc)
7125                 goto hwrm_func_resc_qcaps_exit;
7126
7127         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7128         if (!all)
7129                 goto hwrm_func_resc_qcaps_exit;
7130
7131         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7132         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7133         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7134         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7135         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7136         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7137         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7138         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7139         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7140         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7141         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7142         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7143         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7144         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7145         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7146         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7147
7148         if (bp->flags & BNXT_FLAG_CHIP_P5) {
7149                 u16 max_msix = le16_to_cpu(resp->max_msix);
7150
7151                 hw_resc->max_nqs = max_msix;
7152                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7153         }
7154
7155         if (BNXT_PF(bp)) {
7156                 struct bnxt_pf_info *pf = &bp->pf;
7157
7158                 pf->vf_resv_strategy =
7159                         le16_to_cpu(resp->vf_reservation_strategy);
7160                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7161                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7162         }
7163 hwrm_func_resc_qcaps_exit:
7164         mutex_unlock(&bp->hwrm_cmd_lock);
7165         return rc;
7166 }
7167
7168 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7169 {
7170         int rc = 0;
7171         struct hwrm_func_qcaps_input req = {0};
7172         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7173         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7174         u32 flags, flags_ext;
7175
7176         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7177         req.fid = cpu_to_le16(0xffff);
7178
7179         mutex_lock(&bp->hwrm_cmd_lock);
7180         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7181         if (rc)
7182                 goto hwrm_func_qcaps_exit;
7183
7184         flags = le32_to_cpu(resp->flags);
7185         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7186                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7187         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7188                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7189         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7190                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7191         if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7192                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7193         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7194                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7195         if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7196                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7197         if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7198                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7199         if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7200                 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7201
7202         flags_ext = le32_to_cpu(resp->flags_ext);
7203         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7204                 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7205
7206         bp->tx_push_thresh = 0;
7207         if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7208             BNXT_FW_MAJ(bp) > 217)
7209                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7210
7211         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7212         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7213         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7214         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7215         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7216         if (!hw_resc->max_hw_ring_grps)
7217                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7218         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7219         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7220         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7221
7222         if (BNXT_PF(bp)) {
7223                 struct bnxt_pf_info *pf = &bp->pf;
7224
7225                 pf->fw_fid = le16_to_cpu(resp->fid);
7226                 pf->port_id = le16_to_cpu(resp->port_id);
7227                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7228                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7229                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7230                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7231                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7232                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7233                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7234                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7235                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7236                 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7237                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7238                         bp->flags |= BNXT_FLAG_WOL_CAP;
7239         } else {
7240 #ifdef CONFIG_BNXT_SRIOV
7241                 struct bnxt_vf_info *vf = &bp->vf;
7242
7243                 vf->fw_fid = le16_to_cpu(resp->fid);
7244                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7245 #endif
7246         }
7247
7248 hwrm_func_qcaps_exit:
7249         mutex_unlock(&bp->hwrm_cmd_lock);
7250         return rc;
7251 }
7252
7253 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7254
7255 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7256 {
7257         int rc;
7258
7259         rc = __bnxt_hwrm_func_qcaps(bp);
7260         if (rc)
7261                 return rc;
7262         rc = bnxt_hwrm_queue_qportcfg(bp);
7263         if (rc) {
7264                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7265                 return rc;
7266         }
7267         if (bp->hwrm_spec_code >= 0x10803) {
7268                 rc = bnxt_alloc_ctx_mem(bp);
7269                 if (rc)
7270                         return rc;
7271                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7272                 if (!rc)
7273                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7274         }
7275         return 0;
7276 }
7277
7278 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7279 {
7280         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7281         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7282         int rc = 0;
7283         u32 flags;
7284
7285         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7286                 return 0;
7287
7288         resp = bp->hwrm_cmd_resp_addr;
7289         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7290
7291         mutex_lock(&bp->hwrm_cmd_lock);
7292         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7293         if (rc)
7294                 goto hwrm_cfa_adv_qcaps_exit;
7295
7296         flags = le32_to_cpu(resp->flags);
7297         if (flags &
7298             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7299                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7300
7301 hwrm_cfa_adv_qcaps_exit:
7302         mutex_unlock(&bp->hwrm_cmd_lock);
7303         return rc;
7304 }
7305
7306 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7307 {
7308         struct bnxt_fw_health *fw_health = bp->fw_health;
7309         u32 reg_base = 0xffffffff;
7310         int i;
7311
7312         /* Only pre-map the monitoring GRC registers using window 3 */
7313         for (i = 0; i < 4; i++) {
7314                 u32 reg = fw_health->regs[i];
7315
7316                 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7317                         continue;
7318                 if (reg_base == 0xffffffff)
7319                         reg_base = reg & BNXT_GRC_BASE_MASK;
7320                 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7321                         return -ERANGE;
7322                 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
7323                                             (reg & BNXT_GRC_OFFSET_MASK);
7324         }
7325         if (reg_base == 0xffffffff)
7326                 return 0;
7327
7328         writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7329                          BNXT_FW_HEALTH_WIN_MAP_OFF);
7330         return 0;
7331 }
7332
7333 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7334 {
7335         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7336         struct bnxt_fw_health *fw_health = bp->fw_health;
7337         struct hwrm_error_recovery_qcfg_input req = {0};
7338         int rc, i;
7339
7340         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7341                 return 0;
7342
7343         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7344         mutex_lock(&bp->hwrm_cmd_lock);
7345         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7346         if (rc)
7347                 goto err_recovery_out;
7348         fw_health->flags = le32_to_cpu(resp->flags);
7349         if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7350             !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7351                 rc = -EINVAL;
7352                 goto err_recovery_out;
7353         }
7354         fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7355         fw_health->master_func_wait_dsecs =
7356                 le32_to_cpu(resp->master_func_wait_period);
7357         fw_health->normal_func_wait_dsecs =
7358                 le32_to_cpu(resp->normal_func_wait_period);
7359         fw_health->post_reset_wait_dsecs =
7360                 le32_to_cpu(resp->master_func_wait_period_after_reset);
7361         fw_health->post_reset_max_wait_dsecs =
7362                 le32_to_cpu(resp->max_bailout_time_after_reset);
7363         fw_health->regs[BNXT_FW_HEALTH_REG] =
7364                 le32_to_cpu(resp->fw_health_status_reg);
7365         fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7366                 le32_to_cpu(resp->fw_heartbeat_reg);
7367         fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7368                 le32_to_cpu(resp->fw_reset_cnt_reg);
7369         fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7370                 le32_to_cpu(resp->reset_inprogress_reg);
7371         fw_health->fw_reset_inprog_reg_mask =
7372                 le32_to_cpu(resp->reset_inprogress_reg_mask);
7373         fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7374         if (fw_health->fw_reset_seq_cnt >= 16) {
7375                 rc = -EINVAL;
7376                 goto err_recovery_out;
7377         }
7378         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7379                 fw_health->fw_reset_seq_regs[i] =
7380                         le32_to_cpu(resp->reset_reg[i]);
7381                 fw_health->fw_reset_seq_vals[i] =
7382                         le32_to_cpu(resp->reset_reg_val[i]);
7383                 fw_health->fw_reset_seq_delay_msec[i] =
7384                         resp->delay_after_reset[i];
7385         }
7386 err_recovery_out:
7387         mutex_unlock(&bp->hwrm_cmd_lock);
7388         if (!rc)
7389                 rc = bnxt_map_fw_health_regs(bp);
7390         if (rc)
7391                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7392         return rc;
7393 }
7394
7395 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7396 {
7397         struct hwrm_func_reset_input req = {0};
7398
7399         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7400         req.enables = 0;
7401
7402         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7403 }
7404
7405 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7406 {
7407         int rc = 0;
7408         struct hwrm_queue_qportcfg_input req = {0};
7409         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7410         u8 i, j, *qptr;
7411         bool no_rdma;
7412
7413         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7414
7415         mutex_lock(&bp->hwrm_cmd_lock);
7416         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7417         if (rc)
7418                 goto qportcfg_exit;
7419
7420         if (!resp->max_configurable_queues) {
7421                 rc = -EINVAL;
7422                 goto qportcfg_exit;
7423         }
7424         bp->max_tc = resp->max_configurable_queues;
7425         bp->max_lltc = resp->max_configurable_lossless_queues;
7426         if (bp->max_tc > BNXT_MAX_QUEUE)
7427                 bp->max_tc = BNXT_MAX_QUEUE;
7428
7429         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7430         qptr = &resp->queue_id0;
7431         for (i = 0, j = 0; i < bp->max_tc; i++) {
7432                 bp->q_info[j].queue_id = *qptr;
7433                 bp->q_ids[i] = *qptr++;
7434                 bp->q_info[j].queue_profile = *qptr++;
7435                 bp->tc_to_qidx[j] = j;
7436                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7437                     (no_rdma && BNXT_PF(bp)))
7438                         j++;
7439         }
7440         bp->max_q = bp->max_tc;
7441         bp->max_tc = max_t(u8, j, 1);
7442
7443         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7444                 bp->max_tc = 1;
7445
7446         if (bp->max_lltc > bp->max_tc)
7447                 bp->max_lltc = bp->max_tc;
7448
7449 qportcfg_exit:
7450         mutex_unlock(&bp->hwrm_cmd_lock);
7451         return rc;
7452 }
7453
7454 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7455 {
7456         struct hwrm_ver_get_input req = {0};
7457         int rc;
7458
7459         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7460         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7461         req.hwrm_intf_min = HWRM_VERSION_MINOR;
7462         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7463
7464         rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7465                                    silent);
7466         return rc;
7467 }
7468
7469 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7470 {
7471         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7472         u16 fw_maj, fw_min, fw_bld, fw_rsv;
7473         u32 dev_caps_cfg, hwrm_ver;
7474         int rc, len;
7475
7476         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7477         mutex_lock(&bp->hwrm_cmd_lock);
7478         rc = __bnxt_hwrm_ver_get(bp, false);
7479         if (rc)
7480                 goto hwrm_ver_get_exit;
7481
7482         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7483
7484         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7485                              resp->hwrm_intf_min_8b << 8 |
7486                              resp->hwrm_intf_upd_8b;
7487         if (resp->hwrm_intf_maj_8b < 1) {
7488                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7489                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7490                             resp->hwrm_intf_upd_8b);
7491                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7492         }
7493
7494         hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7495                         HWRM_VERSION_UPDATE;
7496
7497         if (bp->hwrm_spec_code > hwrm_ver)
7498                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7499                          HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7500                          HWRM_VERSION_UPDATE);
7501         else
7502                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7503                          resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7504                          resp->hwrm_intf_upd_8b);
7505
7506         fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7507         if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7508                 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7509                 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7510                 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7511                 len = FW_VER_STR_LEN;
7512         } else {
7513                 fw_maj = resp->hwrm_fw_maj_8b;
7514                 fw_min = resp->hwrm_fw_min_8b;
7515                 fw_bld = resp->hwrm_fw_bld_8b;
7516                 fw_rsv = resp->hwrm_fw_rsvd_8b;
7517                 len = BC_HWRM_STR_LEN;
7518         }
7519         bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7520         snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7521                  fw_rsv);
7522
7523         if (strlen(resp->active_pkg_name)) {
7524                 int fw_ver_len = strlen(bp->fw_ver_str);
7525
7526                 snprintf(bp->fw_ver_str + fw_ver_len,
7527                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7528                          resp->active_pkg_name);
7529                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7530         }
7531
7532         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7533         if (!bp->hwrm_cmd_timeout)
7534                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7535
7536         if (resp->hwrm_intf_maj_8b >= 1) {
7537                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7538                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7539         }
7540         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7541                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7542
7543         bp->chip_num = le16_to_cpu(resp->chip_num);
7544         bp->chip_rev = resp->chip_rev;
7545         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7546             !resp->chip_metal)
7547                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7548
7549         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7550         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7551             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7552                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7553
7554         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7555                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7556
7557         if (dev_caps_cfg &
7558             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7559                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7560
7561         if (dev_caps_cfg &
7562             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7563                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7564
7565         if (dev_caps_cfg &
7566             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7567                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7568
7569 hwrm_ver_get_exit:
7570         mutex_unlock(&bp->hwrm_cmd_lock);
7571         return rc;
7572 }
7573
7574 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7575 {
7576         struct hwrm_fw_set_time_input req = {0};
7577         struct tm tm;
7578         time64_t now = ktime_get_real_seconds();
7579
7580         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7581             bp->hwrm_spec_code < 0x10400)
7582                 return -EOPNOTSUPP;
7583
7584         time64_to_tm(now, 0, &tm);
7585         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7586         req.year = cpu_to_le16(1900 + tm.tm_year);
7587         req.month = 1 + tm.tm_mon;
7588         req.day = tm.tm_mday;
7589         req.hour = tm.tm_hour;
7590         req.minute = tm.tm_min;
7591         req.second = tm.tm_sec;
7592         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7593 }
7594
7595 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
7596 {
7597         u64 sw_tmp;
7598
7599         sw_tmp = (*sw & ~mask) | hw;
7600         if (hw < (*sw & mask))
7601                 sw_tmp += mask + 1;
7602         WRITE_ONCE(*sw, sw_tmp);
7603 }
7604
7605 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
7606                                     int count, bool ignore_zero)
7607 {
7608         int i;
7609
7610         for (i = 0; i < count; i++) {
7611                 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
7612
7613                 if (ignore_zero && !hw)
7614                         continue;
7615
7616                 if (masks[i] == -1ULL)
7617                         sw_stats[i] = hw;
7618                 else
7619                         bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
7620         }
7621 }
7622
7623 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
7624 {
7625         if (!stats->hw_stats)
7626                 return;
7627
7628         __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
7629                                 stats->hw_masks, stats->len / 8, false);
7630 }
7631
7632 static void bnxt_accumulate_all_stats(struct bnxt *bp)
7633 {
7634         struct bnxt_stats_mem *ring0_stats;
7635         bool ignore_zero = false;
7636         int i;
7637
7638         /* Chip bug.  Counter intermittently becomes 0. */
7639         if (bp->flags & BNXT_FLAG_CHIP_P5)
7640                 ignore_zero = true;
7641
7642         for (i = 0; i < bp->cp_nr_rings; i++) {
7643                 struct bnxt_napi *bnapi = bp->bnapi[i];
7644                 struct bnxt_cp_ring_info *cpr;
7645                 struct bnxt_stats_mem *stats;
7646
7647                 cpr = &bnapi->cp_ring;
7648                 stats = &cpr->stats;
7649                 if (!i)
7650                         ring0_stats = stats;
7651                 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
7652                                         ring0_stats->hw_masks,
7653                                         ring0_stats->len / 8, ignore_zero);
7654         }
7655         if (bp->flags & BNXT_FLAG_PORT_STATS) {
7656                 struct bnxt_stats_mem *stats = &bp->port_stats;
7657                 __le64 *hw_stats = stats->hw_stats;
7658                 u64 *sw_stats = stats->sw_stats;
7659                 u64 *masks = stats->hw_masks;
7660                 int cnt;
7661
7662                 cnt = sizeof(struct rx_port_stats) / 8;
7663                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
7664
7665                 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7666                 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7667                 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7668                 cnt = sizeof(struct tx_port_stats) / 8;
7669                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
7670         }
7671         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
7672                 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
7673                 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
7674         }
7675 }
7676
7677 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
7678 {
7679         struct bnxt_pf_info *pf = &bp->pf;
7680         struct hwrm_port_qstats_input req = {0};
7681
7682         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7683                 return 0;
7684
7685         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
7686                 return -EOPNOTSUPP;
7687
7688         req.flags = flags;
7689         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7690         req.port_id = cpu_to_le16(pf->port_id);
7691         req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
7692                                             BNXT_TX_PORT_STATS_BYTE_OFFSET);
7693         req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
7694         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7695 }
7696
7697 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
7698 {
7699         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
7700         struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
7701         struct hwrm_port_qstats_ext_input req = {0};
7702         struct bnxt_pf_info *pf = &bp->pf;
7703         u32 tx_stat_size;
7704         int rc;
7705
7706         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7707                 return 0;
7708
7709         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
7710                 return -EOPNOTSUPP;
7711
7712         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7713         req.flags = flags;
7714         req.port_id = cpu_to_le16(pf->port_id);
7715         req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7716         req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
7717         tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
7718                        sizeof(struct tx_port_stats_ext) : 0;
7719         req.tx_stat_size = cpu_to_le16(tx_stat_size);
7720         req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
7721         mutex_lock(&bp->hwrm_cmd_lock);
7722         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7723         if (!rc) {
7724                 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
7725                 bp->fw_tx_stats_ext_size = tx_stat_size ?
7726                         le16_to_cpu(resp->tx_stat_size) / 8 : 0;
7727         } else {
7728                 bp->fw_rx_stats_ext_size = 0;
7729                 bp->fw_tx_stats_ext_size = 0;
7730         }
7731         if (flags)
7732                 goto qstats_done;
7733
7734         if (bp->fw_tx_stats_ext_size <=
7735             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7736                 mutex_unlock(&bp->hwrm_cmd_lock);
7737                 bp->pri2cos_valid = 0;
7738                 return rc;
7739         }
7740
7741         bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
7742         req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
7743
7744         rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
7745         if (!rc) {
7746                 struct hwrm_queue_pri2cos_qcfg_output *resp2;
7747                 u8 *pri2cos;
7748                 int i, j;
7749
7750                 resp2 = bp->hwrm_cmd_resp_addr;
7751                 pri2cos = &resp2->pri0_cos_queue_id;
7752                 for (i = 0; i < 8; i++) {
7753                         u8 queue_id = pri2cos[i];
7754                         u8 queue_idx;
7755
7756                         /* Per port queue IDs start from 0, 10, 20, etc */
7757                         queue_idx = queue_id % 10;
7758                         if (queue_idx > BNXT_MAX_QUEUE) {
7759                                 bp->pri2cos_valid = false;
7760                                 goto qstats_done;
7761                         }
7762                         for (j = 0; j < bp->max_q; j++) {
7763                                 if (bp->q_ids[j] == queue_id)
7764                                         bp->pri2cos_idx[i] = queue_idx;
7765                         }
7766                 }
7767                 bp->pri2cos_valid = 1;
7768         }
7769 qstats_done:
7770         mutex_unlock(&bp->hwrm_cmd_lock);
7771         return rc;
7772 }
7773
7774 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
7775 {
7776         if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
7777                 bnxt_hwrm_tunnel_dst_port_free(
7778                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7779         if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
7780                 bnxt_hwrm_tunnel_dst_port_free(
7781                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7782 }
7783
7784 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
7785 {
7786         int rc, i;
7787         u32 tpa_flags = 0;
7788
7789         if (set_tpa)
7790                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
7791         else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
7792                 return 0;
7793         for (i = 0; i < bp->nr_vnics; i++) {
7794                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
7795                 if (rc) {
7796                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
7797                                    i, rc);
7798                         return rc;
7799                 }
7800         }
7801         return 0;
7802 }
7803
7804 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7805 {
7806         int i;
7807
7808         for (i = 0; i < bp->nr_vnics; i++)
7809                 bnxt_hwrm_vnic_set_rss(bp, i, false);
7810 }
7811
7812 static void bnxt_clear_vnic(struct bnxt *bp)
7813 {
7814         if (!bp->vnic_info)
7815                 return;
7816
7817         bnxt_hwrm_clear_vnic_filter(bp);
7818         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
7819                 /* clear all RSS setting before free vnic ctx */
7820                 bnxt_hwrm_clear_vnic_rss(bp);
7821                 bnxt_hwrm_vnic_ctx_free(bp);
7822         }
7823         /* before free the vnic, undo the vnic tpa settings */
7824         if (bp->flags & BNXT_FLAG_TPA)
7825                 bnxt_set_tpa(bp, false);
7826         bnxt_hwrm_vnic_free(bp);
7827         if (bp->flags & BNXT_FLAG_CHIP_P5)
7828                 bnxt_hwrm_vnic_ctx_free(bp);
7829 }
7830
7831 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7832                                     bool irq_re_init)
7833 {
7834         bnxt_clear_vnic(bp);
7835         bnxt_hwrm_ring_free(bp, close_path);
7836         bnxt_hwrm_ring_grp_free(bp);
7837         if (irq_re_init) {
7838                 bnxt_hwrm_stat_ctx_free(bp);
7839                 bnxt_hwrm_free_tunnel_ports(bp);
7840         }
7841 }
7842
7843 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
7844 {
7845         struct hwrm_func_cfg_input req = {0};
7846
7847         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7848         req.fid = cpu_to_le16(0xffff);
7849         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
7850         if (br_mode == BRIDGE_MODE_VEB)
7851                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
7852         else if (br_mode == BRIDGE_MODE_VEPA)
7853                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
7854         else
7855                 return -EINVAL;
7856         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7857 }
7858
7859 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
7860 {
7861         struct hwrm_func_cfg_input req = {0};
7862
7863         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
7864                 return 0;
7865
7866         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7867         req.fid = cpu_to_le16(0xffff);
7868         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
7869         req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
7870         if (size == 128)
7871                 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
7872
7873         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7874 }
7875
7876 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7877 {
7878         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
7879         int rc;
7880
7881         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
7882                 goto skip_rss_ctx;
7883
7884         /* allocate context for vnic */
7885         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
7886         if (rc) {
7887                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7888                            vnic_id, rc);
7889                 goto vnic_setup_err;
7890         }
7891         bp->rsscos_nr_ctxs++;
7892
7893         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7894                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
7895                 if (rc) {
7896                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
7897                                    vnic_id, rc);
7898                         goto vnic_setup_err;
7899                 }
7900                 bp->rsscos_nr_ctxs++;
7901         }
7902
7903 skip_rss_ctx:
7904         /* configure default vnic, ring grp */
7905         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7906         if (rc) {
7907                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7908                            vnic_id, rc);
7909                 goto vnic_setup_err;
7910         }
7911
7912         /* Enable RSS hashing on vnic */
7913         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
7914         if (rc) {
7915                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
7916                            vnic_id, rc);
7917                 goto vnic_setup_err;
7918         }
7919
7920         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7921                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7922                 if (rc) {
7923                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7924                                    vnic_id, rc);
7925                 }
7926         }
7927
7928 vnic_setup_err:
7929         return rc;
7930 }
7931
7932 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
7933 {
7934         int rc, i, nr_ctxs;
7935
7936         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
7937         for (i = 0; i < nr_ctxs; i++) {
7938                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
7939                 if (rc) {
7940                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
7941                                    vnic_id, i, rc);
7942                         break;
7943                 }
7944                 bp->rsscos_nr_ctxs++;
7945         }
7946         if (i < nr_ctxs)
7947                 return -ENOMEM;
7948
7949         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
7950         if (rc) {
7951                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
7952                            vnic_id, rc);
7953                 return rc;
7954         }
7955         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7956         if (rc) {
7957                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7958                            vnic_id, rc);
7959                 return rc;
7960         }
7961         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7962                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7963                 if (rc) {
7964                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7965                                    vnic_id, rc);
7966                 }
7967         }
7968         return rc;
7969 }
7970
7971 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7972 {
7973         if (bp->flags & BNXT_FLAG_CHIP_P5)
7974                 return __bnxt_setup_vnic_p5(bp, vnic_id);
7975         else
7976                 return __bnxt_setup_vnic(bp, vnic_id);
7977 }
7978
7979 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
7980 {
7981 #ifdef CONFIG_RFS_ACCEL
7982         int i, rc = 0;
7983
7984         if (bp->flags & BNXT_FLAG_CHIP_P5)
7985                 return 0;
7986
7987         for (i = 0; i < bp->rx_nr_rings; i++) {
7988                 struct bnxt_vnic_info *vnic;
7989                 u16 vnic_id = i + 1;
7990                 u16 ring_id = i;
7991
7992                 if (vnic_id >= bp->nr_vnics)
7993                         break;
7994
7995                 vnic = &bp->vnic_info[vnic_id];
7996                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
7997                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7998                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
7999                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8000                 if (rc) {
8001                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8002                                    vnic_id, rc);
8003                         break;
8004                 }
8005                 rc = bnxt_setup_vnic(bp, vnic_id);
8006                 if (rc)
8007                         break;
8008         }
8009         return rc;
8010 #else
8011         return 0;
8012 #endif
8013 }
8014
8015 /* Allow PF and VF with default VLAN to be in promiscuous mode */
8016 static bool bnxt_promisc_ok(struct bnxt *bp)
8017 {
8018 #ifdef CONFIG_BNXT_SRIOV
8019         if (BNXT_VF(bp) && !bp->vf.vlan)
8020                 return false;
8021 #endif
8022         return true;
8023 }
8024
8025 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8026 {
8027         unsigned int rc = 0;
8028
8029         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8030         if (rc) {
8031                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8032                            rc);
8033                 return rc;
8034         }
8035
8036         rc = bnxt_hwrm_vnic_cfg(bp, 1);
8037         if (rc) {
8038                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8039                            rc);
8040                 return rc;
8041         }
8042         return rc;
8043 }
8044
8045 static int bnxt_cfg_rx_mode(struct bnxt *);
8046 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8047
8048 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8049 {
8050         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8051         int rc = 0;
8052         unsigned int rx_nr_rings = bp->rx_nr_rings;
8053
8054         if (irq_re_init) {
8055                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8056                 if (rc) {
8057                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8058                                    rc);
8059                         goto err_out;
8060                 }
8061         }
8062
8063         rc = bnxt_hwrm_ring_alloc(bp);
8064         if (rc) {
8065                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8066                 goto err_out;
8067         }
8068
8069         rc = bnxt_hwrm_ring_grp_alloc(bp);
8070         if (rc) {
8071                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8072                 goto err_out;
8073         }
8074
8075         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8076                 rx_nr_rings--;
8077
8078         /* default vnic 0 */
8079         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8080         if (rc) {
8081                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8082                 goto err_out;
8083         }
8084
8085         rc = bnxt_setup_vnic(bp, 0);
8086         if (rc)
8087                 goto err_out;
8088
8089         if (bp->flags & BNXT_FLAG_RFS) {
8090                 rc = bnxt_alloc_rfs_vnics(bp);
8091                 if (rc)
8092                         goto err_out;
8093         }
8094
8095         if (bp->flags & BNXT_FLAG_TPA) {
8096                 rc = bnxt_set_tpa(bp, true);
8097                 if (rc)
8098                         goto err_out;
8099         }
8100
8101         if (BNXT_VF(bp))
8102                 bnxt_update_vf_mac(bp);
8103
8104         /* Filter for default vnic 0 */
8105         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8106         if (rc) {
8107                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8108                 goto err_out;
8109         }
8110         vnic->uc_filter_count = 1;
8111
8112         vnic->rx_mask = 0;
8113         if (bp->dev->flags & IFF_BROADCAST)
8114                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8115
8116         if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
8117                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8118
8119         if (bp->dev->flags & IFF_ALLMULTI) {
8120                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8121                 vnic->mc_list_count = 0;
8122         } else {
8123                 u32 mask = 0;
8124
8125                 bnxt_mc_list_updated(bp, &mask);
8126                 vnic->rx_mask |= mask;
8127         }
8128
8129         rc = bnxt_cfg_rx_mode(bp);
8130         if (rc)
8131                 goto err_out;
8132
8133         rc = bnxt_hwrm_set_coal(bp);
8134         if (rc)
8135                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8136                                 rc);
8137
8138         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8139                 rc = bnxt_setup_nitroa0_vnic(bp);
8140                 if (rc)
8141                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8142                                    rc);
8143         }
8144
8145         if (BNXT_VF(bp)) {
8146                 bnxt_hwrm_func_qcfg(bp);
8147                 netdev_update_features(bp->dev);
8148         }
8149
8150         return 0;
8151
8152 err_out:
8153         bnxt_hwrm_resource_free(bp, 0, true);
8154
8155         return rc;
8156 }
8157
8158 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8159 {
8160         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8161         return 0;
8162 }
8163
8164 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8165 {
8166         bnxt_init_cp_rings(bp);
8167         bnxt_init_rx_rings(bp);
8168         bnxt_init_tx_rings(bp);
8169         bnxt_init_ring_grps(bp, irq_re_init);
8170         bnxt_init_vnics(bp);
8171
8172         return bnxt_init_chip(bp, irq_re_init);
8173 }
8174
8175 static int bnxt_set_real_num_queues(struct bnxt *bp)
8176 {
8177         int rc;
8178         struct net_device *dev = bp->dev;
8179
8180         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8181                                           bp->tx_nr_rings_xdp);
8182         if (rc)
8183                 return rc;
8184
8185         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8186         if (rc)
8187                 return rc;
8188
8189 #ifdef CONFIG_RFS_ACCEL
8190         if (bp->flags & BNXT_FLAG_RFS)
8191                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8192 #endif
8193
8194         return rc;
8195 }
8196
8197 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8198                            bool shared)
8199 {
8200         int _rx = *rx, _tx = *tx;
8201
8202         if (shared) {
8203                 *rx = min_t(int, _rx, max);
8204                 *tx = min_t(int, _tx, max);
8205         } else {
8206                 if (max < 2)
8207                         return -ENOMEM;
8208
8209                 while (_rx + _tx > max) {
8210                         if (_rx > _tx && _rx > 1)
8211                                 _rx--;
8212                         else if (_tx > 1)
8213                                 _tx--;
8214                 }
8215                 *rx = _rx;
8216                 *tx = _tx;
8217         }
8218         return 0;
8219 }
8220
8221 static void bnxt_setup_msix(struct bnxt *bp)
8222 {
8223         const int len = sizeof(bp->irq_tbl[0].name);
8224         struct net_device *dev = bp->dev;
8225         int tcs, i;
8226
8227         tcs = netdev_get_num_tc(dev);
8228         if (tcs) {
8229                 int i, off, count;
8230
8231                 for (i = 0; i < tcs; i++) {
8232                         count = bp->tx_nr_rings_per_tc;
8233                         off = i * count;
8234                         netdev_set_tc_queue(dev, i, count, off);
8235                 }
8236         }
8237
8238         for (i = 0; i < bp->cp_nr_rings; i++) {
8239                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8240                 char *attr;
8241
8242                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8243                         attr = "TxRx";
8244                 else if (i < bp->rx_nr_rings)
8245                         attr = "rx";
8246                 else
8247                         attr = "tx";
8248
8249                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8250                          attr, i);
8251                 bp->irq_tbl[map_idx].handler = bnxt_msix;
8252         }
8253 }
8254
8255 static void bnxt_setup_inta(struct bnxt *bp)
8256 {
8257         const int len = sizeof(bp->irq_tbl[0].name);
8258
8259         if (netdev_get_num_tc(bp->dev))
8260                 netdev_reset_tc(bp->dev);
8261
8262         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8263                  0);
8264         bp->irq_tbl[0].handler = bnxt_inta;
8265 }
8266
8267 static int bnxt_setup_int_mode(struct bnxt *bp)
8268 {
8269         int rc;
8270
8271         if (bp->flags & BNXT_FLAG_USING_MSIX)
8272                 bnxt_setup_msix(bp);
8273         else
8274                 bnxt_setup_inta(bp);
8275
8276         rc = bnxt_set_real_num_queues(bp);
8277         return rc;
8278 }
8279
8280 #ifdef CONFIG_RFS_ACCEL
8281 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8282 {
8283         return bp->hw_resc.max_rsscos_ctxs;
8284 }
8285
8286 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8287 {
8288         return bp->hw_resc.max_vnics;
8289 }
8290 #endif
8291
8292 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8293 {
8294         return bp->hw_resc.max_stat_ctxs;
8295 }
8296
8297 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8298 {
8299         return bp->hw_resc.max_cp_rings;
8300 }
8301
8302 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8303 {
8304         unsigned int cp = bp->hw_resc.max_cp_rings;
8305
8306         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8307                 cp -= bnxt_get_ulp_msix_num(bp);
8308
8309         return cp;
8310 }
8311
8312 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8313 {
8314         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8315
8316         if (bp->flags & BNXT_FLAG_CHIP_P5)
8317                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8318
8319         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8320 }
8321
8322 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8323 {
8324         bp->hw_resc.max_irqs = max_irqs;
8325 }
8326
8327 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8328 {
8329         unsigned int cp;
8330
8331         cp = bnxt_get_max_func_cp_rings_for_en(bp);
8332         if (bp->flags & BNXT_FLAG_CHIP_P5)
8333                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8334         else
8335                 return cp - bp->cp_nr_rings;
8336 }
8337
8338 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8339 {
8340         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8341 }
8342
8343 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8344 {
8345         int max_cp = bnxt_get_max_func_cp_rings(bp);
8346         int max_irq = bnxt_get_max_func_irqs(bp);
8347         int total_req = bp->cp_nr_rings + num;
8348         int max_idx, avail_msix;
8349
8350         max_idx = bp->total_irqs;
8351         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8352                 max_idx = min_t(int, bp->total_irqs, max_cp);
8353         avail_msix = max_idx - bp->cp_nr_rings;
8354         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8355                 return avail_msix;
8356
8357         if (max_irq < total_req) {
8358                 num = max_irq - bp->cp_nr_rings;
8359                 if (num <= 0)
8360                         return 0;
8361         }
8362         return num;
8363 }
8364
8365 static int bnxt_get_num_msix(struct bnxt *bp)
8366 {
8367         if (!BNXT_NEW_RM(bp))
8368                 return bnxt_get_max_func_irqs(bp);
8369
8370         return bnxt_nq_rings_in_use(bp);
8371 }
8372
8373 static int bnxt_init_msix(struct bnxt *bp)
8374 {
8375         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8376         struct msix_entry *msix_ent;
8377
8378         total_vecs = bnxt_get_num_msix(bp);
8379         max = bnxt_get_max_func_irqs(bp);
8380         if (total_vecs > max)
8381                 total_vecs = max;
8382
8383         if (!total_vecs)
8384                 return 0;
8385
8386         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8387         if (!msix_ent)
8388                 return -ENOMEM;
8389
8390         for (i = 0; i < total_vecs; i++) {
8391                 msix_ent[i].entry = i;
8392                 msix_ent[i].vector = 0;
8393         }
8394
8395         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8396                 min = 2;
8397
8398         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8399         ulp_msix = bnxt_get_ulp_msix_num(bp);
8400         if (total_vecs < 0 || total_vecs < ulp_msix) {
8401                 rc = -ENODEV;
8402                 goto msix_setup_exit;
8403         }
8404
8405         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8406         if (bp->irq_tbl) {
8407                 for (i = 0; i < total_vecs; i++)
8408                         bp->irq_tbl[i].vector = msix_ent[i].vector;
8409
8410                 bp->total_irqs = total_vecs;
8411                 /* Trim rings based upon num of vectors allocated */
8412                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8413                                      total_vecs - ulp_msix, min == 1);
8414                 if (rc)
8415                         goto msix_setup_exit;
8416
8417                 bp->cp_nr_rings = (min == 1) ?
8418                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8419                                   bp->tx_nr_rings + bp->rx_nr_rings;
8420
8421         } else {
8422                 rc = -ENOMEM;
8423                 goto msix_setup_exit;
8424         }
8425         bp->flags |= BNXT_FLAG_USING_MSIX;
8426         kfree(msix_ent);
8427         return 0;
8428
8429 msix_setup_exit:
8430         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8431         kfree(bp->irq_tbl);
8432         bp->irq_tbl = NULL;
8433         pci_disable_msix(bp->pdev);
8434         kfree(msix_ent);
8435         return rc;
8436 }
8437
8438 static int bnxt_init_inta(struct bnxt *bp)
8439 {
8440         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
8441         if (!bp->irq_tbl)
8442                 return -ENOMEM;
8443
8444         bp->total_irqs = 1;
8445         bp->rx_nr_rings = 1;
8446         bp->tx_nr_rings = 1;
8447         bp->cp_nr_rings = 1;
8448         bp->flags |= BNXT_FLAG_SHARED_RINGS;
8449         bp->irq_tbl[0].vector = bp->pdev->irq;
8450         return 0;
8451 }
8452
8453 static int bnxt_init_int_mode(struct bnxt *bp)
8454 {
8455         int rc = 0;
8456
8457         if (bp->flags & BNXT_FLAG_MSIX_CAP)
8458                 rc = bnxt_init_msix(bp);
8459
8460         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8461                 /* fallback to INTA */
8462                 rc = bnxt_init_inta(bp);
8463         }
8464         return rc;
8465 }
8466
8467 static void bnxt_clear_int_mode(struct bnxt *bp)
8468 {
8469         if (bp->flags & BNXT_FLAG_USING_MSIX)
8470                 pci_disable_msix(bp->pdev);
8471
8472         kfree(bp->irq_tbl);
8473         bp->irq_tbl = NULL;
8474         bp->flags &= ~BNXT_FLAG_USING_MSIX;
8475 }
8476
8477 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8478 {
8479         int tcs = netdev_get_num_tc(bp->dev);
8480         bool irq_cleared = false;
8481         int rc;
8482
8483         if (!bnxt_need_reserve_rings(bp))
8484                 return 0;
8485
8486         if (irq_re_init && BNXT_NEW_RM(bp) &&
8487             bnxt_get_num_msix(bp) != bp->total_irqs) {
8488                 bnxt_ulp_irq_stop(bp);
8489                 bnxt_clear_int_mode(bp);
8490                 irq_cleared = true;
8491         }
8492         rc = __bnxt_reserve_rings(bp);
8493         if (irq_cleared) {
8494                 if (!rc)
8495                         rc = bnxt_init_int_mode(bp);
8496                 bnxt_ulp_irq_restart(bp, rc);
8497         }
8498         if (!netif_is_rxfh_configured(bp->dev))
8499                 bnxt_set_dflt_rss_indir_tbl(bp);
8500
8501         if (rc) {
8502                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8503                 return rc;
8504         }
8505         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8506                 netdev_err(bp->dev, "tx ring reservation failure\n");
8507                 netdev_reset_tc(bp->dev);
8508                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8509                 return -ENOMEM;
8510         }
8511         return 0;
8512 }
8513
8514 static void bnxt_free_irq(struct bnxt *bp)
8515 {
8516         struct bnxt_irq *irq;
8517         int i;
8518
8519 #ifdef CONFIG_RFS_ACCEL
8520         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8521         bp->dev->rx_cpu_rmap = NULL;
8522 #endif
8523         if (!bp->irq_tbl || !bp->bnapi)
8524                 return;
8525
8526         for (i = 0; i < bp->cp_nr_rings; i++) {
8527                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8528
8529                 irq = &bp->irq_tbl[map_idx];
8530                 if (irq->requested) {
8531                         if (irq->have_cpumask) {
8532                                 irq_set_affinity_hint(irq->vector, NULL);
8533                                 free_cpumask_var(irq->cpu_mask);
8534                                 irq->have_cpumask = 0;
8535                         }
8536                         free_irq(irq->vector, bp->bnapi[i]);
8537                 }
8538
8539                 irq->requested = 0;
8540         }
8541 }
8542
8543 static int bnxt_request_irq(struct bnxt *bp)
8544 {
8545         int i, j, rc = 0;
8546         unsigned long flags = 0;
8547 #ifdef CONFIG_RFS_ACCEL
8548         struct cpu_rmap *rmap;
8549 #endif
8550
8551         rc = bnxt_setup_int_mode(bp);
8552         if (rc) {
8553                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8554                            rc);
8555                 return rc;
8556         }
8557 #ifdef CONFIG_RFS_ACCEL
8558         rmap = bp->dev->rx_cpu_rmap;
8559 #endif
8560         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8561                 flags = IRQF_SHARED;
8562
8563         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
8564                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8565                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8566
8567 #ifdef CONFIG_RFS_ACCEL
8568                 if (rmap && bp->bnapi[i]->rx_ring) {
8569                         rc = irq_cpu_rmap_add(rmap, irq->vector);
8570                         if (rc)
8571                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
8572                                             j);
8573                         j++;
8574                 }
8575 #endif
8576                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8577                                  bp->bnapi[i]);
8578                 if (rc)
8579                         break;
8580
8581                 irq->requested = 1;
8582
8583                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8584                         int numa_node = dev_to_node(&bp->pdev->dev);
8585
8586                         irq->have_cpumask = 1;
8587                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8588                                         irq->cpu_mask);
8589                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8590                         if (rc) {
8591                                 netdev_warn(bp->dev,
8592                                             "Set affinity failed, IRQ = %d\n",
8593                                             irq->vector);
8594                                 break;
8595                         }
8596                 }
8597         }
8598         return rc;
8599 }
8600
8601 static void bnxt_del_napi(struct bnxt *bp)
8602 {
8603         int i;
8604
8605         if (!bp->bnapi)
8606                 return;
8607
8608         for (i = 0; i < bp->cp_nr_rings; i++) {
8609                 struct bnxt_napi *bnapi = bp->bnapi[i];
8610
8611                 napi_hash_del(&bnapi->napi);
8612                 netif_napi_del(&bnapi->napi);
8613         }
8614         /* We called napi_hash_del() before netif_napi_del(), we need
8615          * to respect an RCU grace period before freeing napi structures.
8616          */
8617         synchronize_net();
8618 }
8619
8620 static void bnxt_init_napi(struct bnxt *bp)
8621 {
8622         int i;
8623         unsigned int cp_nr_rings = bp->cp_nr_rings;
8624         struct bnxt_napi *bnapi;
8625
8626         if (bp->flags & BNXT_FLAG_USING_MSIX) {
8627                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8628
8629                 if (bp->flags & BNXT_FLAG_CHIP_P5)
8630                         poll_fn = bnxt_poll_p5;
8631                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8632                         cp_nr_rings--;
8633                 for (i = 0; i < cp_nr_rings; i++) {
8634                         bnapi = bp->bnapi[i];
8635                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8636                 }
8637                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8638                         bnapi = bp->bnapi[cp_nr_rings];
8639                         netif_napi_add(bp->dev, &bnapi->napi,
8640                                        bnxt_poll_nitroa0, 64);
8641                 }
8642         } else {
8643                 bnapi = bp->bnapi[0];
8644                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
8645         }
8646 }
8647
8648 static void bnxt_disable_napi(struct bnxt *bp)
8649 {
8650         int i;
8651
8652         if (!bp->bnapi)
8653                 return;
8654
8655         for (i = 0; i < bp->cp_nr_rings; i++) {
8656                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8657
8658                 if (bp->bnapi[i]->rx_ring)
8659                         cancel_work_sync(&cpr->dim.work);
8660
8661                 napi_disable(&bp->bnapi[i]->napi);
8662         }
8663 }
8664
8665 static void bnxt_enable_napi(struct bnxt *bp)
8666 {
8667         int i;
8668
8669         for (i = 0; i < bp->cp_nr_rings; i++) {
8670                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8671                 bp->bnapi[i]->in_reset = false;
8672
8673                 if (bp->bnapi[i]->rx_ring) {
8674                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
8675                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
8676                 }
8677                 napi_enable(&bp->bnapi[i]->napi);
8678         }
8679 }
8680
8681 void bnxt_tx_disable(struct bnxt *bp)
8682 {
8683         int i;
8684         struct bnxt_tx_ring_info *txr;
8685
8686         if (bp->tx_ring) {
8687                 for (i = 0; i < bp->tx_nr_rings; i++) {
8688                         txr = &bp->tx_ring[i];
8689                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
8690                 }
8691         }
8692         /* Stop all TX queues */
8693         netif_tx_disable(bp->dev);
8694         netif_carrier_off(bp->dev);
8695 }
8696
8697 void bnxt_tx_enable(struct bnxt *bp)
8698 {
8699         int i;
8700         struct bnxt_tx_ring_info *txr;
8701
8702         for (i = 0; i < bp->tx_nr_rings; i++) {
8703                 txr = &bp->tx_ring[i];
8704                 txr->dev_state = 0;
8705         }
8706         netif_tx_wake_all_queues(bp->dev);
8707         if (bp->link_info.link_up)
8708                 netif_carrier_on(bp->dev);
8709 }
8710
8711 static void bnxt_report_link(struct bnxt *bp)
8712 {
8713         if (bp->link_info.link_up) {
8714                 const char *duplex;
8715                 const char *flow_ctrl;
8716                 u32 speed;
8717                 u16 fec;
8718
8719                 netif_carrier_on(bp->dev);
8720                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
8721                         duplex = "full";
8722                 else
8723                         duplex = "half";
8724                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
8725                         flow_ctrl = "ON - receive & transmit";
8726                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
8727                         flow_ctrl = "ON - transmit";
8728                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
8729                         flow_ctrl = "ON - receive";
8730                 else
8731                         flow_ctrl = "none";
8732                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
8733                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
8734                             speed, duplex, flow_ctrl);
8735                 if (bp->flags & BNXT_FLAG_EEE_CAP)
8736                         netdev_info(bp->dev, "EEE is %s\n",
8737                                     bp->eee.eee_active ? "active" :
8738                                                          "not active");
8739                 fec = bp->link_info.fec_cfg;
8740                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
8741                         netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
8742                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
8743                                     (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
8744                                      (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
8745         } else {
8746                 netif_carrier_off(bp->dev);
8747                 netdev_err(bp->dev, "NIC Link is Down\n");
8748         }
8749 }
8750
8751 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
8752 {
8753         int rc = 0;
8754         struct hwrm_port_phy_qcaps_input req = {0};
8755         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8756         struct bnxt_link_info *link_info = &bp->link_info;
8757
8758         bp->flags &= ~BNXT_FLAG_EEE_CAP;
8759         if (bp->test_info)
8760                 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
8761                                           BNXT_TEST_FL_AN_PHY_LPBK);
8762         if (bp->hwrm_spec_code < 0x10201)
8763                 return 0;
8764
8765         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
8766
8767         mutex_lock(&bp->hwrm_cmd_lock);
8768         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8769         if (rc)
8770                 goto hwrm_phy_qcaps_exit;
8771
8772         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
8773                 struct ethtool_eee *eee = &bp->eee;
8774                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
8775
8776                 bp->flags |= BNXT_FLAG_EEE_CAP;
8777                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8778                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
8779                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
8780                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
8781                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
8782         }
8783         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
8784                 if (bp->test_info)
8785                         bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
8786         }
8787         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
8788                 if (bp->test_info)
8789                         bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
8790         }
8791         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
8792                 if (BNXT_PF(bp))
8793                         bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
8794         }
8795         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
8796                 bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
8797
8798         if (resp->supported_speeds_auto_mode)
8799                 link_info->support_auto_speeds =
8800                         le16_to_cpu(resp->supported_speeds_auto_mode);
8801
8802         bp->port_count = resp->port_cnt;
8803
8804 hwrm_phy_qcaps_exit:
8805         mutex_unlock(&bp->hwrm_cmd_lock);
8806         return rc;
8807 }
8808
8809 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
8810 {
8811         int rc = 0;
8812         struct bnxt_link_info *link_info = &bp->link_info;
8813         struct hwrm_port_phy_qcfg_input req = {0};
8814         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8815         u8 link_up = link_info->link_up;
8816         u16 diff;
8817
8818         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
8819
8820         mutex_lock(&bp->hwrm_cmd_lock);
8821         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8822         if (rc) {
8823                 mutex_unlock(&bp->hwrm_cmd_lock);
8824                 return rc;
8825         }
8826
8827         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
8828         link_info->phy_link_status = resp->link;
8829         link_info->duplex = resp->duplex_cfg;
8830         if (bp->hwrm_spec_code >= 0x10800)
8831                 link_info->duplex = resp->duplex_state;
8832         link_info->pause = resp->pause;
8833         link_info->auto_mode = resp->auto_mode;
8834         link_info->auto_pause_setting = resp->auto_pause;
8835         link_info->lp_pause = resp->link_partner_adv_pause;
8836         link_info->force_pause_setting = resp->force_pause;
8837         link_info->duplex_setting = resp->duplex_cfg;
8838         if (link_info->phy_link_status == BNXT_LINK_LINK)
8839                 link_info->link_speed = le16_to_cpu(resp->link_speed);
8840         else
8841                 link_info->link_speed = 0;
8842         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
8843         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
8844         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
8845         link_info->lp_auto_link_speeds =
8846                 le16_to_cpu(resp->link_partner_adv_speeds);
8847         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
8848         link_info->phy_ver[0] = resp->phy_maj;
8849         link_info->phy_ver[1] = resp->phy_min;
8850         link_info->phy_ver[2] = resp->phy_bld;
8851         link_info->media_type = resp->media_type;
8852         link_info->phy_type = resp->phy_type;
8853         link_info->transceiver = resp->xcvr_pkg_type;
8854         link_info->phy_addr = resp->eee_config_phy_addr &
8855                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
8856         link_info->module_status = resp->module_status;
8857
8858         if (bp->flags & BNXT_FLAG_EEE_CAP) {
8859                 struct ethtool_eee *eee = &bp->eee;
8860                 u16 fw_speeds;
8861
8862                 eee->eee_active = 0;
8863                 if (resp->eee_config_phy_addr &
8864                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
8865                         eee->eee_active = 1;
8866                         fw_speeds = le16_to_cpu(
8867                                 resp->link_partner_adv_eee_link_speed_mask);
8868                         eee->lp_advertised =
8869                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8870                 }
8871
8872                 /* Pull initial EEE config */
8873                 if (!chng_link_state) {
8874                         if (resp->eee_config_phy_addr &
8875                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
8876                                 eee->eee_enabled = 1;
8877
8878                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
8879                         eee->advertised =
8880                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8881
8882                         if (resp->eee_config_phy_addr &
8883                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
8884                                 __le32 tmr;
8885
8886                                 eee->tx_lpi_enabled = 1;
8887                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
8888                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
8889                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
8890                         }
8891                 }
8892         }
8893
8894         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8895         if (bp->hwrm_spec_code >= 0x10504)
8896                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8897
8898         /* TODO: need to add more logic to report VF link */
8899         if (chng_link_state) {
8900                 if (link_info->phy_link_status == BNXT_LINK_LINK)
8901                         link_info->link_up = 1;
8902                 else
8903                         link_info->link_up = 0;
8904                 if (link_up != link_info->link_up)
8905                         bnxt_report_link(bp);
8906         } else {
8907                 /* alwasy link down if not require to update link state */
8908                 link_info->link_up = 0;
8909         }
8910         mutex_unlock(&bp->hwrm_cmd_lock);
8911
8912         if (!BNXT_PHY_CFG_ABLE(bp))
8913                 return 0;
8914
8915         diff = link_info->support_auto_speeds ^ link_info->advertising;
8916         if ((link_info->support_auto_speeds | diff) !=
8917             link_info->support_auto_speeds) {
8918                 /* An advertised speed is no longer supported, so we need to
8919                  * update the advertisement settings.  Caller holds RTNL
8920                  * so we can modify link settings.
8921                  */
8922                 link_info->advertising = link_info->support_auto_speeds;
8923                 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
8924                         bnxt_hwrm_set_link_setting(bp, true, false);
8925         }
8926         return 0;
8927 }
8928
8929 static void bnxt_get_port_module_status(struct bnxt *bp)
8930 {
8931         struct bnxt_link_info *link_info = &bp->link_info;
8932         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
8933         u8 module_status;
8934
8935         if (bnxt_update_link(bp, true))
8936                 return;
8937
8938         module_status = link_info->module_status;
8939         switch (module_status) {
8940         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
8941         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
8942         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
8943                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
8944                             bp->pf.port_id);
8945                 if (bp->hwrm_spec_code >= 0x10201) {
8946                         netdev_warn(bp->dev, "Module part number %s\n",
8947                                     resp->phy_vendor_partnumber);
8948                 }
8949                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
8950                         netdev_warn(bp->dev, "TX is disabled\n");
8951                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
8952                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
8953         }
8954 }
8955
8956 static void
8957 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
8958 {
8959         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
8960                 if (bp->hwrm_spec_code >= 0x10201)
8961                         req->auto_pause =
8962                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
8963                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8964                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
8965                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8966                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
8967                 req->enables |=
8968                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8969         } else {
8970                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8971                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
8972                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8973                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
8974                 req->enables |=
8975                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
8976                 if (bp->hwrm_spec_code >= 0x10201) {
8977                         req->auto_pause = req->force_pause;
8978                         req->enables |= cpu_to_le32(
8979                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8980                 }
8981         }
8982 }
8983
8984 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
8985                                       struct hwrm_port_phy_cfg_input *req)
8986 {
8987         u8 autoneg = bp->link_info.autoneg;
8988         u16 fw_link_speed = bp->link_info.req_link_speed;
8989         u16 advertising = bp->link_info.advertising;
8990
8991         if (autoneg & BNXT_AUTONEG_SPEED) {
8992                 req->auto_mode |=
8993                         PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
8994
8995                 req->enables |= cpu_to_le32(
8996                         PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
8997                 req->auto_link_speed_mask = cpu_to_le16(advertising);
8998
8999                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9000                 req->flags |=
9001                         cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9002         } else {
9003                 req->force_link_speed = cpu_to_le16(fw_link_speed);
9004                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9005         }
9006
9007         /* tell chimp that the setting takes effect immediately */
9008         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9009 }
9010
9011 int bnxt_hwrm_set_pause(struct bnxt *bp)
9012 {
9013         struct hwrm_port_phy_cfg_input req = {0};
9014         int rc;
9015
9016         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9017         bnxt_hwrm_set_pause_common(bp, &req);
9018
9019         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9020             bp->link_info.force_link_chng)
9021                 bnxt_hwrm_set_link_common(bp, &req);
9022
9023         mutex_lock(&bp->hwrm_cmd_lock);
9024         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9025         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9026                 /* since changing of pause setting doesn't trigger any link
9027                  * change event, the driver needs to update the current pause
9028                  * result upon successfully return of the phy_cfg command
9029                  */
9030                 bp->link_info.pause =
9031                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9032                 bp->link_info.auto_pause_setting = 0;
9033                 if (!bp->link_info.force_link_chng)
9034                         bnxt_report_link(bp);
9035         }
9036         bp->link_info.force_link_chng = false;
9037         mutex_unlock(&bp->hwrm_cmd_lock);
9038         return rc;
9039 }
9040
9041 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9042                               struct hwrm_port_phy_cfg_input *req)
9043 {
9044         struct ethtool_eee *eee = &bp->eee;
9045
9046         if (eee->eee_enabled) {
9047                 u16 eee_speeds;
9048                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9049
9050                 if (eee->tx_lpi_enabled)
9051                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9052                 else
9053                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9054
9055                 req->flags |= cpu_to_le32(flags);
9056                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9057                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9058                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9059         } else {
9060                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9061         }
9062 }
9063
9064 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9065 {
9066         struct hwrm_port_phy_cfg_input req = {0};
9067
9068         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9069         if (set_pause)
9070                 bnxt_hwrm_set_pause_common(bp, &req);
9071
9072         bnxt_hwrm_set_link_common(bp, &req);
9073
9074         if (set_eee)
9075                 bnxt_hwrm_set_eee(bp, &req);
9076         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9077 }
9078
9079 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9080 {
9081         struct hwrm_port_phy_cfg_input req = {0};
9082
9083         if (!BNXT_SINGLE_PF(bp))
9084                 return 0;
9085
9086         if (pci_num_vf(bp->pdev))
9087                 return 0;
9088
9089         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9090         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9091         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9092 }
9093
9094 static int bnxt_fw_init_one(struct bnxt *bp);
9095
9096 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9097 {
9098         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9099         struct hwrm_func_drv_if_change_input req = {0};
9100         bool resc_reinit = false, fw_reset = false;
9101         u32 flags = 0;
9102         int rc;
9103
9104         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9105                 return 0;
9106
9107         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9108         if (up)
9109                 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9110         mutex_lock(&bp->hwrm_cmd_lock);
9111         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9112         if (!rc)
9113                 flags = le32_to_cpu(resp->flags);
9114         mutex_unlock(&bp->hwrm_cmd_lock);
9115         if (rc)
9116                 return rc;
9117
9118         if (!up)
9119                 return 0;
9120
9121         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9122                 resc_reinit = true;
9123         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9124                 fw_reset = true;
9125
9126         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9127                 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9128                 return -ENODEV;
9129         }
9130         if (resc_reinit || fw_reset) {
9131                 if (fw_reset) {
9132                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9133                                 bnxt_ulp_stop(bp);
9134                         bnxt_free_ctx_mem(bp);
9135                         kfree(bp->ctx);
9136                         bp->ctx = NULL;
9137                         bnxt_dcb_free(bp);
9138                         rc = bnxt_fw_init_one(bp);
9139                         if (rc) {
9140                                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9141                                 return rc;
9142                         }
9143                         bnxt_clear_int_mode(bp);
9144                         rc = bnxt_init_int_mode(bp);
9145                         if (rc) {
9146                                 netdev_err(bp->dev, "init int mode failed\n");
9147                                 return rc;
9148                         }
9149                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9150                 }
9151                 if (BNXT_NEW_RM(bp)) {
9152                         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9153
9154                         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9155                         hw_resc->resv_cp_rings = 0;
9156                         hw_resc->resv_stat_ctxs = 0;
9157                         hw_resc->resv_irqs = 0;
9158                         hw_resc->resv_tx_rings = 0;
9159                         hw_resc->resv_rx_rings = 0;
9160                         hw_resc->resv_hw_ring_grps = 0;
9161                         hw_resc->resv_vnics = 0;
9162                         if (!fw_reset) {
9163                                 bp->tx_nr_rings = 0;
9164                                 bp->rx_nr_rings = 0;
9165                         }
9166                 }
9167         }
9168         return 0;
9169 }
9170
9171 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9172 {
9173         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9174         struct hwrm_port_led_qcaps_input req = {0};
9175         struct bnxt_pf_info *pf = &bp->pf;
9176         int rc;
9177
9178         bp->num_leds = 0;
9179         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9180                 return 0;
9181
9182         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9183         req.port_id = cpu_to_le16(pf->port_id);
9184         mutex_lock(&bp->hwrm_cmd_lock);
9185         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9186         if (rc) {
9187                 mutex_unlock(&bp->hwrm_cmd_lock);
9188                 return rc;
9189         }
9190         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9191                 int i;
9192
9193                 bp->num_leds = resp->num_leds;
9194                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9195                                                  bp->num_leds);
9196                 for (i = 0; i < bp->num_leds; i++) {
9197                         struct bnxt_led_info *led = &bp->leds[i];
9198                         __le16 caps = led->led_state_caps;
9199
9200                         if (!led->led_group_id ||
9201                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
9202                                 bp->num_leds = 0;
9203                                 break;
9204                         }
9205                 }
9206         }
9207         mutex_unlock(&bp->hwrm_cmd_lock);
9208         return 0;
9209 }
9210
9211 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9212 {
9213         struct hwrm_wol_filter_alloc_input req = {0};
9214         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9215         int rc;
9216
9217         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9218         req.port_id = cpu_to_le16(bp->pf.port_id);
9219         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9220         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9221         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9222         mutex_lock(&bp->hwrm_cmd_lock);
9223         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9224         if (!rc)
9225                 bp->wol_filter_id = resp->wol_filter_id;
9226         mutex_unlock(&bp->hwrm_cmd_lock);
9227         return rc;
9228 }
9229
9230 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9231 {
9232         struct hwrm_wol_filter_free_input req = {0};
9233
9234         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9235         req.port_id = cpu_to_le16(bp->pf.port_id);
9236         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9237         req.wol_filter_id = bp->wol_filter_id;
9238         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9239 }
9240
9241 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9242 {
9243         struct hwrm_wol_filter_qcfg_input req = {0};
9244         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9245         u16 next_handle = 0;
9246         int rc;
9247
9248         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
9249         req.port_id = cpu_to_le16(bp->pf.port_id);
9250         req.handle = cpu_to_le16(handle);
9251         mutex_lock(&bp->hwrm_cmd_lock);
9252         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9253         if (!rc) {
9254                 next_handle = le16_to_cpu(resp->next_handle);
9255                 if (next_handle != 0) {
9256                         if (resp->wol_type ==
9257                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9258                                 bp->wol = 1;
9259                                 bp->wol_filter_id = resp->wol_filter_id;
9260                         }
9261                 }
9262         }
9263         mutex_unlock(&bp->hwrm_cmd_lock);
9264         return next_handle;
9265 }
9266
9267 static void bnxt_get_wol_settings(struct bnxt *bp)
9268 {
9269         u16 handle = 0;
9270
9271         bp->wol = 0;
9272         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9273                 return;
9274
9275         do {
9276                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9277         } while (handle && handle != 0xffff);
9278 }
9279
9280 #ifdef CONFIG_BNXT_HWMON
9281 static ssize_t bnxt_show_temp(struct device *dev,
9282                               struct device_attribute *devattr, char *buf)
9283 {
9284         struct hwrm_temp_monitor_query_input req = {0};
9285         struct hwrm_temp_monitor_query_output *resp;
9286         struct bnxt *bp = dev_get_drvdata(dev);
9287         u32 temp = 0;
9288
9289         resp = bp->hwrm_cmd_resp_addr;
9290         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9291         mutex_lock(&bp->hwrm_cmd_lock);
9292         if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
9293                 temp = resp->temp * 1000; /* display millidegree */
9294         mutex_unlock(&bp->hwrm_cmd_lock);
9295
9296         return sprintf(buf, "%u\n", temp);
9297 }
9298 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9299
9300 static struct attribute *bnxt_attrs[] = {
9301         &sensor_dev_attr_temp1_input.dev_attr.attr,
9302         NULL
9303 };
9304 ATTRIBUTE_GROUPS(bnxt);
9305
9306 static void bnxt_hwmon_close(struct bnxt *bp)
9307 {
9308         if (bp->hwmon_dev) {
9309                 hwmon_device_unregister(bp->hwmon_dev);
9310                 bp->hwmon_dev = NULL;
9311         }
9312 }
9313
9314 static void bnxt_hwmon_open(struct bnxt *bp)
9315 {
9316         struct pci_dev *pdev = bp->pdev;
9317
9318         if (bp->hwmon_dev)
9319                 return;
9320
9321         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9322                                                           DRV_MODULE_NAME, bp,
9323                                                           bnxt_groups);
9324         if (IS_ERR(bp->hwmon_dev)) {
9325                 bp->hwmon_dev = NULL;
9326                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9327         }
9328 }
9329 #else
9330 static void bnxt_hwmon_close(struct bnxt *bp)
9331 {
9332 }
9333
9334 static void bnxt_hwmon_open(struct bnxt *bp)
9335 {
9336 }
9337 #endif
9338
9339 static bool bnxt_eee_config_ok(struct bnxt *bp)
9340 {
9341         struct ethtool_eee *eee = &bp->eee;
9342         struct bnxt_link_info *link_info = &bp->link_info;
9343
9344         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
9345                 return true;
9346
9347         if (eee->eee_enabled) {
9348                 u32 advertising =
9349                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9350
9351                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9352                         eee->eee_enabled = 0;
9353                         return false;
9354                 }
9355                 if (eee->advertised & ~advertising) {
9356                         eee->advertised = advertising & eee->supported;
9357                         return false;
9358                 }
9359         }
9360         return true;
9361 }
9362
9363 static int bnxt_update_phy_setting(struct bnxt *bp)
9364 {
9365         int rc;
9366         bool update_link = false;
9367         bool update_pause = false;
9368         bool update_eee = false;
9369         struct bnxt_link_info *link_info = &bp->link_info;
9370
9371         rc = bnxt_update_link(bp, true);
9372         if (rc) {
9373                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9374                            rc);
9375                 return rc;
9376         }
9377         if (!BNXT_SINGLE_PF(bp))
9378                 return 0;
9379
9380         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9381             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
9382             link_info->req_flow_ctrl)
9383                 update_pause = true;
9384         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9385             link_info->force_pause_setting != link_info->req_flow_ctrl)
9386                 update_pause = true;
9387         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9388                 if (BNXT_AUTO_MODE(link_info->auto_mode))
9389                         update_link = true;
9390                 if (link_info->req_link_speed != link_info->force_link_speed)
9391                         update_link = true;
9392                 if (link_info->req_duplex != link_info->duplex_setting)
9393                         update_link = true;
9394         } else {
9395                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
9396                         update_link = true;
9397                 if (link_info->advertising != link_info->auto_link_speeds)
9398                         update_link = true;
9399         }
9400
9401         /* The last close may have shutdown the link, so need to call
9402          * PHY_CFG to bring it back up.
9403          */
9404         if (!bp->link_info.link_up)
9405                 update_link = true;
9406
9407         if (!bnxt_eee_config_ok(bp))
9408                 update_eee = true;
9409
9410         if (update_link)
9411                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
9412         else if (update_pause)
9413                 rc = bnxt_hwrm_set_pause(bp);
9414         if (rc) {
9415                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9416                            rc);
9417                 return rc;
9418         }
9419
9420         return rc;
9421 }
9422
9423 /* Common routine to pre-map certain register block to different GRC window.
9424  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9425  * in PF and 3 windows in VF that can be customized to map in different
9426  * register blocks.
9427  */
9428 static void bnxt_preset_reg_win(struct bnxt *bp)
9429 {
9430         if (BNXT_PF(bp)) {
9431                 /* CAG registers map to GRC window #4 */
9432                 writel(BNXT_CAG_REG_BASE,
9433                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9434         }
9435 }
9436
9437 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9438
9439 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9440 {
9441         int rc = 0;
9442
9443         bnxt_preset_reg_win(bp);
9444         netif_carrier_off(bp->dev);
9445         if (irq_re_init) {
9446                 /* Reserve rings now if none were reserved at driver probe. */
9447                 rc = bnxt_init_dflt_ring_mode(bp);
9448                 if (rc) {
9449                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9450                         return rc;
9451                 }
9452         }
9453         rc = bnxt_reserve_rings(bp, irq_re_init);
9454         if (rc)
9455                 return rc;
9456         if ((bp->flags & BNXT_FLAG_RFS) &&
9457             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
9458                 /* disable RFS if falling back to INTA */
9459                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
9460                 bp->flags &= ~BNXT_FLAG_RFS;
9461         }
9462
9463         rc = bnxt_alloc_mem(bp, irq_re_init);
9464         if (rc) {
9465                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9466                 goto open_err_free_mem;
9467         }
9468
9469         if (irq_re_init) {
9470                 bnxt_init_napi(bp);
9471                 rc = bnxt_request_irq(bp);
9472                 if (rc) {
9473                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
9474                         goto open_err_irq;
9475                 }
9476         }
9477
9478         bnxt_enable_napi(bp);
9479         bnxt_debug_dev_init(bp);
9480
9481         rc = bnxt_init_nic(bp, irq_re_init);
9482         if (rc) {
9483                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9484                 goto open_err;
9485         }
9486
9487         if (link_re_init) {
9488                 mutex_lock(&bp->link_lock);
9489                 rc = bnxt_update_phy_setting(bp);
9490                 mutex_unlock(&bp->link_lock);
9491                 if (rc) {
9492                         netdev_warn(bp->dev, "failed to update phy settings\n");
9493                         if (BNXT_SINGLE_PF(bp)) {
9494                                 bp->link_info.phy_retry = true;
9495                                 bp->link_info.phy_retry_expires =
9496                                         jiffies + 5 * HZ;
9497                         }
9498                 }
9499         }
9500
9501         if (irq_re_init)
9502                 udp_tunnel_nic_reset_ntf(bp->dev);
9503
9504         set_bit(BNXT_STATE_OPEN, &bp->state);
9505         bnxt_enable_int(bp);
9506         /* Enable TX queues */
9507         bnxt_tx_enable(bp);
9508         mod_timer(&bp->timer, jiffies + bp->current_interval);
9509         /* Poll link status and check for SFP+ module status */
9510         bnxt_get_port_module_status(bp);
9511
9512         /* VF-reps may need to be re-opened after the PF is re-opened */
9513         if (BNXT_PF(bp))
9514                 bnxt_vf_reps_open(bp);
9515         return 0;
9516
9517 open_err:
9518         bnxt_debug_dev_exit(bp);
9519         bnxt_disable_napi(bp);
9520
9521 open_err_irq:
9522         bnxt_del_napi(bp);
9523
9524 open_err_free_mem:
9525         bnxt_free_skbs(bp);
9526         bnxt_free_irq(bp);
9527         bnxt_free_mem(bp, true);
9528         return rc;
9529 }
9530
9531 /* rtnl_lock held */
9532 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9533 {
9534         int rc = 0;
9535
9536         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
9537         if (rc) {
9538                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
9539                 dev_close(bp->dev);
9540         }
9541         return rc;
9542 }
9543
9544 /* rtnl_lock held, open the NIC half way by allocating all resources, but
9545  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
9546  * self tests.
9547  */
9548 int bnxt_half_open_nic(struct bnxt *bp)
9549 {
9550         int rc = 0;
9551
9552         rc = bnxt_alloc_mem(bp, false);
9553         if (rc) {
9554                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9555                 goto half_open_err;
9556         }
9557         rc = bnxt_init_nic(bp, false);
9558         if (rc) {
9559                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9560                 goto half_open_err;
9561         }
9562         return 0;
9563
9564 half_open_err:
9565         bnxt_free_skbs(bp);
9566         bnxt_free_mem(bp, false);
9567         dev_close(bp->dev);
9568         return rc;
9569 }
9570
9571 /* rtnl_lock held, this call can only be made after a previous successful
9572  * call to bnxt_half_open_nic().
9573  */
9574 void bnxt_half_close_nic(struct bnxt *bp)
9575 {
9576         bnxt_hwrm_resource_free(bp, false, false);
9577         bnxt_free_skbs(bp);
9578         bnxt_free_mem(bp, false);
9579 }
9580
9581 static void bnxt_reenable_sriov(struct bnxt *bp)
9582 {
9583         if (BNXT_PF(bp)) {
9584                 struct bnxt_pf_info *pf = &bp->pf;
9585                 int n = pf->active_vfs;
9586
9587                 if (n)
9588                         bnxt_cfg_hw_sriov(bp, &n, true);
9589         }
9590 }
9591
9592 static int bnxt_open(struct net_device *dev)
9593 {
9594         struct bnxt *bp = netdev_priv(dev);
9595         int rc;
9596
9597         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9598                 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
9599                 return -ENODEV;
9600         }
9601
9602         rc = bnxt_hwrm_if_change(bp, true);
9603         if (rc)
9604                 return rc;
9605         rc = __bnxt_open_nic(bp, true, true);
9606         if (rc) {
9607                 bnxt_hwrm_if_change(bp, false);
9608         } else {
9609                 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
9610                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9611                                 bnxt_ulp_start(bp, 0);
9612                                 bnxt_reenable_sriov(bp);
9613                         }
9614                 }
9615                 bnxt_hwmon_open(bp);
9616         }
9617
9618         return rc;
9619 }
9620
9621 static bool bnxt_drv_busy(struct bnxt *bp)
9622 {
9623         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
9624                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
9625 }
9626
9627 static void bnxt_get_ring_stats(struct bnxt *bp,
9628                                 struct rtnl_link_stats64 *stats);
9629
9630 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
9631                              bool link_re_init)
9632 {
9633         /* Close the VF-reps before closing PF */
9634         if (BNXT_PF(bp))
9635                 bnxt_vf_reps_close(bp);
9636
9637         /* Change device state to avoid TX queue wake up's */
9638         bnxt_tx_disable(bp);
9639
9640         clear_bit(BNXT_STATE_OPEN, &bp->state);
9641         smp_mb__after_atomic();
9642         while (bnxt_drv_busy(bp))
9643                 msleep(20);
9644
9645         /* Flush rings and and disable interrupts */
9646         bnxt_shutdown_nic(bp, irq_re_init);
9647
9648         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
9649
9650         bnxt_debug_dev_exit(bp);
9651         bnxt_disable_napi(bp);
9652         del_timer_sync(&bp->timer);
9653         bnxt_free_skbs(bp);
9654
9655         /* Save ring stats before shutdown */
9656         if (bp->bnapi && irq_re_init)
9657                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
9658         if (irq_re_init) {
9659                 bnxt_free_irq(bp);
9660                 bnxt_del_napi(bp);
9661         }
9662         bnxt_free_mem(bp, irq_re_init);
9663 }
9664
9665 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9666 {
9667         int rc = 0;
9668
9669         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9670                 /* If we get here, it means firmware reset is in progress
9671                  * while we are trying to close.  We can safely proceed with
9672                  * the close because we are holding rtnl_lock().  Some firmware
9673                  * messages may fail as we proceed to close.  We set the
9674                  * ABORT_ERR flag here so that the FW reset thread will later
9675                  * abort when it gets the rtnl_lock() and sees the flag.
9676                  */
9677                 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
9678                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9679         }
9680
9681 #ifdef CONFIG_BNXT_SRIOV
9682         if (bp->sriov_cfg) {
9683                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
9684                                                       !bp->sriov_cfg,
9685                                                       BNXT_SRIOV_CFG_WAIT_TMO);
9686                 if (rc)
9687                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
9688         }
9689 #endif
9690         __bnxt_close_nic(bp, irq_re_init, link_re_init);
9691         return rc;
9692 }
9693
9694 static int bnxt_close(struct net_device *dev)
9695 {
9696         struct bnxt *bp = netdev_priv(dev);
9697
9698         bnxt_hwmon_close(bp);
9699         bnxt_close_nic(bp, true, true);
9700         bnxt_hwrm_shutdown_link(bp);
9701         bnxt_hwrm_if_change(bp, false);
9702         return 0;
9703 }
9704
9705 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
9706                                    u16 *val)
9707 {
9708         struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
9709         struct hwrm_port_phy_mdio_read_input req = {0};
9710         int rc;
9711
9712         if (bp->hwrm_spec_code < 0x10a00)
9713                 return -EOPNOTSUPP;
9714
9715         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
9716         req.port_id = cpu_to_le16(bp->pf.port_id);
9717         req.phy_addr = phy_addr;
9718         req.reg_addr = cpu_to_le16(reg & 0x1f);
9719         if (mdio_phy_id_is_c45(phy_addr)) {
9720                 req.cl45_mdio = 1;
9721                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9722                 req.dev_addr = mdio_phy_id_devad(phy_addr);
9723                 req.reg_addr = cpu_to_le16(reg);
9724         }
9725
9726         mutex_lock(&bp->hwrm_cmd_lock);
9727         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9728         if (!rc)
9729                 *val = le16_to_cpu(resp->reg_data);
9730         mutex_unlock(&bp->hwrm_cmd_lock);
9731         return rc;
9732 }
9733
9734 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
9735                                     u16 val)
9736 {
9737         struct hwrm_port_phy_mdio_write_input req = {0};
9738
9739         if (bp->hwrm_spec_code < 0x10a00)
9740                 return -EOPNOTSUPP;
9741
9742         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
9743         req.port_id = cpu_to_le16(bp->pf.port_id);
9744         req.phy_addr = phy_addr;
9745         req.reg_addr = cpu_to_le16(reg & 0x1f);
9746         if (mdio_phy_id_is_c45(phy_addr)) {
9747                 req.cl45_mdio = 1;
9748                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9749                 req.dev_addr = mdio_phy_id_devad(phy_addr);
9750                 req.reg_addr = cpu_to_le16(reg);
9751         }
9752         req.reg_data = cpu_to_le16(val);
9753
9754         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9755 }
9756
9757 /* rtnl_lock held */
9758 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9759 {
9760         struct mii_ioctl_data *mdio = if_mii(ifr);
9761         struct bnxt *bp = netdev_priv(dev);
9762         int rc;
9763
9764         switch (cmd) {
9765         case SIOCGMIIPHY:
9766                 mdio->phy_id = bp->link_info.phy_addr;
9767
9768                 /* fallthru */
9769         case SIOCGMIIREG: {
9770                 u16 mii_regval = 0;
9771
9772                 if (!netif_running(dev))
9773                         return -EAGAIN;
9774
9775                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
9776                                              &mii_regval);
9777                 mdio->val_out = mii_regval;
9778                 return rc;
9779         }
9780
9781         case SIOCSMIIREG:
9782                 if (!netif_running(dev))
9783                         return -EAGAIN;
9784
9785                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
9786                                                 mdio->val_in);
9787
9788         default:
9789                 /* do nothing */
9790                 break;
9791         }
9792         return -EOPNOTSUPP;
9793 }
9794
9795 static void bnxt_get_ring_stats(struct bnxt *bp,
9796                                 struct rtnl_link_stats64 *stats)
9797 {
9798         int i;
9799
9800         for (i = 0; i < bp->cp_nr_rings; i++) {
9801                 struct bnxt_napi *bnapi = bp->bnapi[i];
9802                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9803                 u64 *sw = cpr->stats.sw_stats;
9804
9805                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
9806                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
9807                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
9808
9809                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
9810                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
9811                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
9812
9813                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
9814                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
9815                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
9816
9817                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
9818                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
9819                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
9820
9821                 stats->rx_missed_errors +=
9822                         BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
9823
9824                 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
9825
9826                 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
9827         }
9828 }
9829
9830 static void bnxt_add_prev_stats(struct bnxt *bp,
9831                                 struct rtnl_link_stats64 *stats)
9832 {
9833         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
9834
9835         stats->rx_packets += prev_stats->rx_packets;
9836         stats->tx_packets += prev_stats->tx_packets;
9837         stats->rx_bytes += prev_stats->rx_bytes;
9838         stats->tx_bytes += prev_stats->tx_bytes;
9839         stats->rx_missed_errors += prev_stats->rx_missed_errors;
9840         stats->multicast += prev_stats->multicast;
9841         stats->tx_dropped += prev_stats->tx_dropped;
9842 }
9843
9844 static void
9845 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
9846 {
9847         struct bnxt *bp = netdev_priv(dev);
9848
9849         set_bit(BNXT_STATE_READ_STATS, &bp->state);
9850         /* Make sure bnxt_close_nic() sees that we are reading stats before
9851          * we check the BNXT_STATE_OPEN flag.
9852          */
9853         smp_mb__after_atomic();
9854         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9855                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9856                 *stats = bp->net_stats_prev;
9857                 return;
9858         }
9859
9860         bnxt_get_ring_stats(bp, stats);
9861         bnxt_add_prev_stats(bp, stats);
9862
9863         if (bp->flags & BNXT_FLAG_PORT_STATS) {
9864                 u64 *rx = bp->port_stats.sw_stats;
9865                 u64 *tx = bp->port_stats.sw_stats +
9866                           BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9867
9868                 stats->rx_crc_errors =
9869                         BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
9870                 stats->rx_frame_errors =
9871                         BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
9872                 stats->rx_length_errors =
9873                         BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
9874                         BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
9875                         BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
9876                 stats->rx_errors =
9877                         BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
9878                         BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
9879                 stats->collisions =
9880                         BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
9881                 stats->tx_fifo_errors =
9882                         BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
9883                 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
9884         }
9885         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9886 }
9887
9888 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
9889 {
9890         struct net_device *dev = bp->dev;
9891         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9892         struct netdev_hw_addr *ha;
9893         u8 *haddr;
9894         int mc_count = 0;
9895         bool update = false;
9896         int off = 0;
9897
9898         netdev_for_each_mc_addr(ha, dev) {
9899                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
9900                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9901                         vnic->mc_list_count = 0;
9902                         return false;
9903                 }
9904                 haddr = ha->addr;
9905                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
9906                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
9907                         update = true;
9908                 }
9909                 off += ETH_ALEN;
9910                 mc_count++;
9911         }
9912         if (mc_count)
9913                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
9914
9915         if (mc_count != vnic->mc_list_count) {
9916                 vnic->mc_list_count = mc_count;
9917                 update = true;
9918         }
9919         return update;
9920 }
9921
9922 static bool bnxt_uc_list_updated(struct bnxt *bp)
9923 {
9924         struct net_device *dev = bp->dev;
9925         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9926         struct netdev_hw_addr *ha;
9927         int off = 0;
9928
9929         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
9930                 return true;
9931
9932         netdev_for_each_uc_addr(ha, dev) {
9933                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
9934                         return true;
9935
9936                 off += ETH_ALEN;
9937         }
9938         return false;
9939 }
9940
9941 static void bnxt_set_rx_mode(struct net_device *dev)
9942 {
9943         struct bnxt *bp = netdev_priv(dev);
9944         struct bnxt_vnic_info *vnic;
9945         bool mc_update = false;
9946         bool uc_update;
9947         u32 mask;
9948
9949         if (!test_bit(BNXT_STATE_OPEN, &bp->state))
9950                 return;
9951
9952         vnic = &bp->vnic_info[0];
9953         mask = vnic->rx_mask;
9954         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
9955                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
9956                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
9957                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
9958
9959         if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
9960                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9961
9962         uc_update = bnxt_uc_list_updated(bp);
9963
9964         if (dev->flags & IFF_BROADCAST)
9965                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
9966         if (dev->flags & IFF_ALLMULTI) {
9967                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9968                 vnic->mc_list_count = 0;
9969         } else {
9970                 mc_update = bnxt_mc_list_updated(bp, &mask);
9971         }
9972
9973         if (mask != vnic->rx_mask || uc_update || mc_update) {
9974                 vnic->rx_mask = mask;
9975
9976                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
9977                 bnxt_queue_sp_work(bp);
9978         }
9979 }
9980
9981 static int bnxt_cfg_rx_mode(struct bnxt *bp)
9982 {
9983         struct net_device *dev = bp->dev;
9984         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9985         struct netdev_hw_addr *ha;
9986         int i, off = 0, rc;
9987         bool uc_update;
9988
9989         netif_addr_lock_bh(dev);
9990         uc_update = bnxt_uc_list_updated(bp);
9991         netif_addr_unlock_bh(dev);
9992
9993         if (!uc_update)
9994                 goto skip_uc;
9995
9996         mutex_lock(&bp->hwrm_cmd_lock);
9997         for (i = 1; i < vnic->uc_filter_count; i++) {
9998                 struct hwrm_cfa_l2_filter_free_input req = {0};
9999
10000                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10001                                        -1);
10002
10003                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
10004
10005                 rc = _hwrm_send_message(bp, &req, sizeof(req),
10006                                         HWRM_CMD_TIMEOUT);
10007         }
10008         mutex_unlock(&bp->hwrm_cmd_lock);
10009
10010         vnic->uc_filter_count = 1;
10011
10012         netif_addr_lock_bh(dev);
10013         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10014                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10015         } else {
10016                 netdev_for_each_uc_addr(ha, dev) {
10017                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10018                         off += ETH_ALEN;
10019                         vnic->uc_filter_count++;
10020                 }
10021         }
10022         netif_addr_unlock_bh(dev);
10023
10024         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10025                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10026                 if (rc) {
10027                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10028                                    rc);
10029                         vnic->uc_filter_count = i;
10030                         return rc;
10031                 }
10032         }
10033
10034 skip_uc:
10035         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10036         if (rc && vnic->mc_list_count) {
10037                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10038                             rc);
10039                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10040                 vnic->mc_list_count = 0;
10041                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10042         }
10043         if (rc)
10044                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10045                            rc);
10046
10047         return rc;
10048 }
10049
10050 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10051 {
10052 #ifdef CONFIG_BNXT_SRIOV
10053         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10054                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10055
10056                 /* No minimum rings were provisioned by the PF.  Don't
10057                  * reserve rings by default when device is down.
10058                  */
10059                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10060                         return true;
10061
10062                 if (!netif_running(bp->dev))
10063                         return false;
10064         }
10065 #endif
10066         return true;
10067 }
10068
10069 /* If the chip and firmware supports RFS */
10070 static bool bnxt_rfs_supported(struct bnxt *bp)
10071 {
10072         if (bp->flags & BNXT_FLAG_CHIP_P5) {
10073                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10074                         return true;
10075                 return false;
10076         }
10077         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10078                 return true;
10079         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10080                 return true;
10081         return false;
10082 }
10083
10084 /* If runtime conditions support RFS */
10085 static bool bnxt_rfs_capable(struct bnxt *bp)
10086 {
10087 #ifdef CONFIG_RFS_ACCEL
10088         int vnics, max_vnics, max_rss_ctxs;
10089
10090         if (bp->flags & BNXT_FLAG_CHIP_P5)
10091                 return bnxt_rfs_supported(bp);
10092         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10093                 return false;
10094
10095         vnics = 1 + bp->rx_nr_rings;
10096         max_vnics = bnxt_get_max_func_vnics(bp);
10097         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10098
10099         /* RSS contexts not a limiting factor */
10100         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10101                 max_rss_ctxs = max_vnics;
10102         if (vnics > max_vnics || vnics > max_rss_ctxs) {
10103                 if (bp->rx_nr_rings > 1)
10104                         netdev_warn(bp->dev,
10105                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10106                                     min(max_rss_ctxs - 1, max_vnics - 1));
10107                 return false;
10108         }
10109
10110         if (!BNXT_NEW_RM(bp))
10111                 return true;
10112
10113         if (vnics == bp->hw_resc.resv_vnics)
10114                 return true;
10115
10116         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10117         if (vnics <= bp->hw_resc.resv_vnics)
10118                 return true;
10119
10120         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10121         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10122         return false;
10123 #else
10124         return false;
10125 #endif
10126 }
10127
10128 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10129                                            netdev_features_t features)
10130 {
10131         struct bnxt *bp = netdev_priv(dev);
10132         netdev_features_t vlan_features;
10133
10134         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10135                 features &= ~NETIF_F_NTUPLE;
10136
10137         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10138                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10139
10140         if (!(features & NETIF_F_GRO))
10141                 features &= ~NETIF_F_GRO_HW;
10142
10143         if (features & NETIF_F_GRO_HW)
10144                 features &= ~NETIF_F_LRO;
10145
10146         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10147          * turned on or off together.
10148          */
10149         vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10150         if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10151                 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10152                         features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10153                 else if (vlan_features)
10154                         features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10155         }
10156 #ifdef CONFIG_BNXT_SRIOV
10157         if (BNXT_VF(bp) && bp->vf.vlan)
10158                 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10159 #endif
10160         return features;
10161 }
10162
10163 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10164 {
10165         struct bnxt *bp = netdev_priv(dev);
10166         u32 flags = bp->flags;
10167         u32 changes;
10168         int rc = 0;
10169         bool re_init = false;
10170         bool update_tpa = false;
10171
10172         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10173         if (features & NETIF_F_GRO_HW)
10174                 flags |= BNXT_FLAG_GRO;
10175         else if (features & NETIF_F_LRO)
10176                 flags |= BNXT_FLAG_LRO;
10177
10178         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10179                 flags &= ~BNXT_FLAG_TPA;
10180
10181         if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10182                 flags |= BNXT_FLAG_STRIP_VLAN;
10183
10184         if (features & NETIF_F_NTUPLE)
10185                 flags |= BNXT_FLAG_RFS;
10186
10187         changes = flags ^ bp->flags;
10188         if (changes & BNXT_FLAG_TPA) {
10189                 update_tpa = true;
10190                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10191                     (flags & BNXT_FLAG_TPA) == 0 ||
10192                     (bp->flags & BNXT_FLAG_CHIP_P5))
10193                         re_init = true;
10194         }
10195
10196         if (changes & ~BNXT_FLAG_TPA)
10197                 re_init = true;
10198
10199         if (flags != bp->flags) {
10200                 u32 old_flags = bp->flags;
10201
10202                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10203                         bp->flags = flags;
10204                         if (update_tpa)
10205                                 bnxt_set_ring_params(bp);
10206                         return rc;
10207                 }
10208
10209                 if (re_init) {
10210                         bnxt_close_nic(bp, false, false);
10211                         bp->flags = flags;
10212                         if (update_tpa)
10213                                 bnxt_set_ring_params(bp);
10214
10215                         return bnxt_open_nic(bp, false, false);
10216                 }
10217                 if (update_tpa) {
10218                         bp->flags = flags;
10219                         rc = bnxt_set_tpa(bp,
10220                                           (flags & BNXT_FLAG_TPA) ?
10221                                           true : false);
10222                         if (rc)
10223                                 bp->flags = old_flags;
10224                 }
10225         }
10226         return rc;
10227 }
10228
10229 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
10230                          u32 *reg_buf)
10231 {
10232         struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
10233         struct hwrm_dbg_read_direct_input req = {0};
10234         __le32 *dbg_reg_buf;
10235         dma_addr_t mapping;
10236         int rc, i;
10237
10238         dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
10239                                          &mapping, GFP_KERNEL);
10240         if (!dbg_reg_buf)
10241                 return -ENOMEM;
10242         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
10243         req.host_dest_addr = cpu_to_le64(mapping);
10244         req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
10245         req.read_len32 = cpu_to_le32(num_words);
10246         mutex_lock(&bp->hwrm_cmd_lock);
10247         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10248         if (rc || resp->error_code) {
10249                 rc = -EIO;
10250                 goto dbg_rd_reg_exit;
10251         }
10252         for (i = 0; i < num_words; i++)
10253                 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
10254
10255 dbg_rd_reg_exit:
10256         mutex_unlock(&bp->hwrm_cmd_lock);
10257         dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
10258         return rc;
10259 }
10260
10261 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
10262                                        u32 ring_id, u32 *prod, u32 *cons)
10263 {
10264         struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
10265         struct hwrm_dbg_ring_info_get_input req = {0};
10266         int rc;
10267
10268         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
10269         req.ring_type = ring_type;
10270         req.fw_ring_id = cpu_to_le32(ring_id);
10271         mutex_lock(&bp->hwrm_cmd_lock);
10272         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10273         if (!rc) {
10274                 *prod = le32_to_cpu(resp->producer_index);
10275                 *cons = le32_to_cpu(resp->consumer_index);
10276         }
10277         mutex_unlock(&bp->hwrm_cmd_lock);
10278         return rc;
10279 }
10280
10281 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
10282 {
10283         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
10284         int i = bnapi->index;
10285
10286         if (!txr)
10287                 return;
10288
10289         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
10290                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
10291                     txr->tx_cons);
10292 }
10293
10294 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
10295 {
10296         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
10297         int i = bnapi->index;
10298
10299         if (!rxr)
10300                 return;
10301
10302         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
10303                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
10304                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
10305                     rxr->rx_sw_agg_prod);
10306 }
10307
10308 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
10309 {
10310         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10311         int i = bnapi->index;
10312
10313         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
10314                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
10315 }
10316
10317 static void bnxt_dbg_dump_states(struct bnxt *bp)
10318 {
10319         int i;
10320         struct bnxt_napi *bnapi;
10321
10322         for (i = 0; i < bp->cp_nr_rings; i++) {
10323                 bnapi = bp->bnapi[i];
10324                 if (netif_msg_drv(bp)) {
10325                         bnxt_dump_tx_sw_state(bnapi);
10326                         bnxt_dump_rx_sw_state(bnapi);
10327                         bnxt_dump_cp_sw_state(bnapi);
10328                 }
10329         }
10330 }
10331
10332 static void bnxt_reset_task(struct bnxt *bp, bool silent)
10333 {
10334         if (!silent)
10335                 bnxt_dbg_dump_states(bp);
10336         if (netif_running(bp->dev)) {
10337                 int rc;
10338
10339                 if (silent) {
10340                         bnxt_close_nic(bp, false, false);
10341                         bnxt_open_nic(bp, false, false);
10342                 } else {
10343                         bnxt_ulp_stop(bp);
10344                         bnxt_close_nic(bp, true, false);
10345                         rc = bnxt_open_nic(bp, true, false);
10346                         bnxt_ulp_start(bp, rc);
10347                 }
10348         }
10349 }
10350
10351 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
10352 {
10353         struct bnxt *bp = netdev_priv(dev);
10354
10355         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
10356         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
10357         bnxt_queue_sp_work(bp);
10358 }
10359
10360 static void bnxt_fw_health_check(struct bnxt *bp)
10361 {
10362         struct bnxt_fw_health *fw_health = bp->fw_health;
10363         u32 val;
10364
10365         if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10366                 return;
10367
10368         if (fw_health->tmr_counter) {
10369                 fw_health->tmr_counter--;
10370                 return;
10371         }
10372
10373         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10374         if (val == fw_health->last_fw_heartbeat)
10375                 goto fw_reset;
10376
10377         fw_health->last_fw_heartbeat = val;
10378
10379         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10380         if (val != fw_health->last_fw_reset_cnt)
10381                 goto fw_reset;
10382
10383         fw_health->tmr_counter = fw_health->tmr_multiplier;
10384         return;
10385
10386 fw_reset:
10387         set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
10388         bnxt_queue_sp_work(bp);
10389 }
10390
10391 static void bnxt_timer(struct timer_list *t)
10392 {
10393         struct bnxt *bp = from_timer(bp, t, timer);
10394         struct net_device *dev = bp->dev;
10395
10396         if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
10397                 return;
10398
10399         if (atomic_read(&bp->intr_sem) != 0)
10400                 goto bnxt_restart_timer;
10401
10402         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
10403                 bnxt_fw_health_check(bp);
10404
10405         if (bp->link_info.link_up && bp->stats_coal_ticks) {
10406                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
10407                 bnxt_queue_sp_work(bp);
10408         }
10409
10410         if (bnxt_tc_flower_enabled(bp)) {
10411                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
10412                 bnxt_queue_sp_work(bp);
10413         }
10414
10415 #ifdef CONFIG_RFS_ACCEL
10416         if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
10417                 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
10418                 bnxt_queue_sp_work(bp);
10419         }
10420 #endif /*CONFIG_RFS_ACCEL*/
10421
10422         if (bp->link_info.phy_retry) {
10423                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
10424                         bp->link_info.phy_retry = false;
10425                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
10426                 } else {
10427                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
10428                         bnxt_queue_sp_work(bp);
10429                 }
10430         }
10431
10432         if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
10433             netif_carrier_ok(dev)) {
10434                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
10435                 bnxt_queue_sp_work(bp);
10436         }
10437 bnxt_restart_timer:
10438         mod_timer(&bp->timer, jiffies + bp->current_interval);
10439 }
10440
10441 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
10442 {
10443         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
10444          * set.  If the device is being closed, bnxt_close() may be holding
10445          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
10446          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
10447          */
10448         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10449         rtnl_lock();
10450 }
10451
10452 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
10453 {
10454         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10455         rtnl_unlock();
10456 }
10457
10458 /* Only called from bnxt_sp_task() */
10459 static void bnxt_reset(struct bnxt *bp, bool silent)
10460 {
10461         bnxt_rtnl_lock_sp(bp);
10462         if (test_bit(BNXT_STATE_OPEN, &bp->state))
10463                 bnxt_reset_task(bp, silent);
10464         bnxt_rtnl_unlock_sp(bp);
10465 }
10466
10467 static void bnxt_fw_reset_close(struct bnxt *bp)
10468 {
10469         bnxt_ulp_stop(bp);
10470         /* When firmware is fatal state, disable PCI device to prevent
10471          * any potential bad DMAs before freeing kernel memory.
10472          */
10473         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10474                 pci_disable_device(bp->pdev);
10475         __bnxt_close_nic(bp, true, false);
10476         bnxt_clear_int_mode(bp);
10477         bnxt_hwrm_func_drv_unrgtr(bp);
10478         if (pci_is_enabled(bp->pdev))
10479                 pci_disable_device(bp->pdev);
10480         bnxt_free_ctx_mem(bp);
10481         kfree(bp->ctx);
10482         bp->ctx = NULL;
10483 }
10484
10485 static bool is_bnxt_fw_ok(struct bnxt *bp)
10486 {
10487         struct bnxt_fw_health *fw_health = bp->fw_health;
10488         bool no_heartbeat = false, has_reset = false;
10489         u32 val;
10490
10491         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10492         if (val == fw_health->last_fw_heartbeat)
10493                 no_heartbeat = true;
10494
10495         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10496         if (val != fw_health->last_fw_reset_cnt)
10497                 has_reset = true;
10498
10499         if (!no_heartbeat && has_reset)
10500                 return true;
10501
10502         return false;
10503 }
10504
10505 /* rtnl_lock is acquired before calling this function */
10506 static void bnxt_force_fw_reset(struct bnxt *bp)
10507 {
10508         struct bnxt_fw_health *fw_health = bp->fw_health;
10509         u32 wait_dsecs;
10510
10511         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
10512             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10513                 return;
10514
10515         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10516         bnxt_fw_reset_close(bp);
10517         wait_dsecs = fw_health->master_func_wait_dsecs;
10518         if (fw_health->master) {
10519                 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
10520                         wait_dsecs = 0;
10521                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10522         } else {
10523                 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
10524                 wait_dsecs = fw_health->normal_func_wait_dsecs;
10525                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10526         }
10527
10528         bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
10529         bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
10530         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10531 }
10532
10533 void bnxt_fw_exception(struct bnxt *bp)
10534 {
10535         netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
10536         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10537         bnxt_rtnl_lock_sp(bp);
10538         bnxt_force_fw_reset(bp);
10539         bnxt_rtnl_unlock_sp(bp);
10540 }
10541
10542 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
10543  * < 0 on error.
10544  */
10545 static int bnxt_get_registered_vfs(struct bnxt *bp)
10546 {
10547 #ifdef CONFIG_BNXT_SRIOV
10548         int rc;
10549
10550         if (!BNXT_PF(bp))
10551                 return 0;
10552
10553         rc = bnxt_hwrm_func_qcfg(bp);
10554         if (rc) {
10555                 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
10556                 return rc;
10557         }
10558         if (bp->pf.registered_vfs)
10559                 return bp->pf.registered_vfs;
10560         if (bp->sriov_cfg)
10561                 return 1;
10562 #endif
10563         return 0;
10564 }
10565
10566 void bnxt_fw_reset(struct bnxt *bp)
10567 {
10568         bnxt_rtnl_lock_sp(bp);
10569         if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
10570             !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10571                 int n = 0, tmo;
10572
10573                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10574                 if (bp->pf.active_vfs &&
10575                     !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10576                         n = bnxt_get_registered_vfs(bp);
10577                 if (n < 0) {
10578                         netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
10579                                    n);
10580                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10581                         dev_close(bp->dev);
10582                         goto fw_reset_exit;
10583                 } else if (n > 0) {
10584                         u16 vf_tmo_dsecs = n * 10;
10585
10586                         if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
10587                                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
10588                         bp->fw_reset_state =
10589                                 BNXT_FW_RESET_STATE_POLL_VF;
10590                         bnxt_queue_fw_reset_work(bp, HZ / 10);
10591                         goto fw_reset_exit;
10592                 }
10593                 bnxt_fw_reset_close(bp);
10594                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10595                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10596                         tmo = HZ / 10;
10597                 } else {
10598                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10599                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
10600                 }
10601                 bnxt_queue_fw_reset_work(bp, tmo);
10602         }
10603 fw_reset_exit:
10604         bnxt_rtnl_unlock_sp(bp);
10605 }
10606
10607 static void bnxt_chk_missed_irq(struct bnxt *bp)
10608 {
10609         int i;
10610
10611         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10612                 return;
10613
10614         for (i = 0; i < bp->cp_nr_rings; i++) {
10615                 struct bnxt_napi *bnapi = bp->bnapi[i];
10616                 struct bnxt_cp_ring_info *cpr;
10617                 u32 fw_ring_id;
10618                 int j;
10619
10620                 if (!bnapi)
10621                         continue;
10622
10623                 cpr = &bnapi->cp_ring;
10624                 for (j = 0; j < 2; j++) {
10625                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
10626                         u32 val[2];
10627
10628                         if (!cpr2 || cpr2->has_more_work ||
10629                             !bnxt_has_work(bp, cpr2))
10630                                 continue;
10631
10632                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
10633                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
10634                                 continue;
10635                         }
10636                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
10637                         bnxt_dbg_hwrm_ring_info_get(bp,
10638                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
10639                                 fw_ring_id, &val[0], &val[1]);
10640                         cpr->sw_stats.cmn.missed_irqs++;
10641                 }
10642         }
10643 }
10644
10645 static void bnxt_cfg_ntp_filters(struct bnxt *);
10646
10647 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
10648 {
10649         struct bnxt_link_info *link_info = &bp->link_info;
10650
10651         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
10652                 link_info->autoneg = BNXT_AUTONEG_SPEED;
10653                 if (bp->hwrm_spec_code >= 0x10201) {
10654                         if (link_info->auto_pause_setting &
10655                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10656                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10657                 } else {
10658                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10659                 }
10660                 link_info->advertising = link_info->auto_link_speeds;
10661         } else {
10662                 link_info->req_link_speed = link_info->force_link_speed;
10663                 link_info->req_duplex = link_info->duplex_setting;
10664         }
10665         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10666                 link_info->req_flow_ctrl =
10667                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10668         else
10669                 link_info->req_flow_ctrl = link_info->force_pause_setting;
10670 }
10671
10672 static void bnxt_sp_task(struct work_struct *work)
10673 {
10674         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
10675
10676         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10677         smp_mb__after_atomic();
10678         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10679                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10680                 return;
10681         }
10682
10683         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
10684                 bnxt_cfg_rx_mode(bp);
10685
10686         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
10687                 bnxt_cfg_ntp_filters(bp);
10688         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
10689                 bnxt_hwrm_exec_fwd_req(bp);
10690         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
10691                 bnxt_hwrm_port_qstats(bp, 0);
10692                 bnxt_hwrm_port_qstats_ext(bp, 0);
10693                 bnxt_accumulate_all_stats(bp);
10694         }
10695
10696         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
10697                 int rc;
10698
10699                 mutex_lock(&bp->link_lock);
10700                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
10701                                        &bp->sp_event))
10702                         bnxt_hwrm_phy_qcaps(bp);
10703
10704                 rc = bnxt_update_link(bp, true);
10705                 if (rc)
10706                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
10707                                    rc);
10708
10709                 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
10710                                        &bp->sp_event))
10711                         bnxt_init_ethtool_link_settings(bp);
10712                 mutex_unlock(&bp->link_lock);
10713         }
10714         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
10715                 int rc;
10716
10717                 mutex_lock(&bp->link_lock);
10718                 rc = bnxt_update_phy_setting(bp);
10719                 mutex_unlock(&bp->link_lock);
10720                 if (rc) {
10721                         netdev_warn(bp->dev, "update phy settings retry failed\n");
10722                 } else {
10723                         bp->link_info.phy_retry = false;
10724                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
10725                 }
10726         }
10727         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
10728                 mutex_lock(&bp->link_lock);
10729                 bnxt_get_port_module_status(bp);
10730                 mutex_unlock(&bp->link_lock);
10731         }
10732
10733         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
10734                 bnxt_tc_flow_stats_work(bp);
10735
10736         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
10737                 bnxt_chk_missed_irq(bp);
10738
10739         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
10740          * must be the last functions to be called before exiting.
10741          */
10742         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
10743                 bnxt_reset(bp, false);
10744
10745         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
10746                 bnxt_reset(bp, true);
10747
10748         if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
10749                 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
10750
10751         if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
10752                 if (!is_bnxt_fw_ok(bp))
10753                         bnxt_devlink_health_report(bp,
10754                                                    BNXT_FW_EXCEPTION_SP_EVENT);
10755         }
10756
10757         smp_mb__before_atomic();
10758         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10759 }
10760
10761 /* Under rtnl_lock */
10762 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
10763                      int tx_xdp)
10764 {
10765         int max_rx, max_tx, tx_sets = 1;
10766         int tx_rings_needed, stats;
10767         int rx_rings = rx;
10768         int cp, vnics, rc;
10769
10770         if (tcs)
10771                 tx_sets = tcs;
10772
10773         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
10774         if (rc)
10775                 return rc;
10776
10777         if (max_rx < rx)
10778                 return -ENOMEM;
10779
10780         tx_rings_needed = tx * tx_sets + tx_xdp;
10781         if (max_tx < tx_rings_needed)
10782                 return -ENOMEM;
10783
10784         vnics = 1;
10785         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
10786                 vnics += rx_rings;
10787
10788         if (bp->flags & BNXT_FLAG_AGG_RINGS)
10789                 rx_rings <<= 1;
10790         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
10791         stats = cp;
10792         if (BNXT_NEW_RM(bp)) {
10793                 cp += bnxt_get_ulp_msix_num(bp);
10794                 stats += bnxt_get_ulp_stat_ctxs(bp);
10795         }
10796         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
10797                                      stats, vnics);
10798 }
10799
10800 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
10801 {
10802         if (bp->bar2) {
10803                 pci_iounmap(pdev, bp->bar2);
10804                 bp->bar2 = NULL;
10805         }
10806
10807         if (bp->bar1) {
10808                 pci_iounmap(pdev, bp->bar1);
10809                 bp->bar1 = NULL;
10810         }
10811
10812         if (bp->bar0) {
10813                 pci_iounmap(pdev, bp->bar0);
10814                 bp->bar0 = NULL;
10815         }
10816 }
10817
10818 static void bnxt_cleanup_pci(struct bnxt *bp)
10819 {
10820         bnxt_unmap_bars(bp, bp->pdev);
10821         pci_release_regions(bp->pdev);
10822         if (pci_is_enabled(bp->pdev))
10823                 pci_disable_device(bp->pdev);
10824 }
10825
10826 static void bnxt_init_dflt_coal(struct bnxt *bp)
10827 {
10828         struct bnxt_coal *coal;
10829
10830         /* Tick values in micro seconds.
10831          * 1 coal_buf x bufs_per_record = 1 completion record.
10832          */
10833         coal = &bp->rx_coal;
10834         coal->coal_ticks = 10;
10835         coal->coal_bufs = 30;
10836         coal->coal_ticks_irq = 1;
10837         coal->coal_bufs_irq = 2;
10838         coal->idle_thresh = 50;
10839         coal->bufs_per_record = 2;
10840         coal->budget = 64;              /* NAPI budget */
10841
10842         coal = &bp->tx_coal;
10843         coal->coal_ticks = 28;
10844         coal->coal_bufs = 30;
10845         coal->coal_ticks_irq = 2;
10846         coal->coal_bufs_irq = 2;
10847         coal->bufs_per_record = 1;
10848
10849         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
10850 }
10851
10852 static void bnxt_alloc_fw_health(struct bnxt *bp)
10853 {
10854         if (bp->fw_health)
10855                 return;
10856
10857         if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
10858             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10859                 return;
10860
10861         bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
10862         if (!bp->fw_health) {
10863                 netdev_warn(bp->dev, "Failed to allocate fw_health\n");
10864                 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
10865                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10866         }
10867 }
10868
10869 static int bnxt_fw_init_one_p1(struct bnxt *bp)
10870 {
10871         int rc;
10872
10873         bp->fw_cap = 0;
10874         rc = bnxt_hwrm_ver_get(bp);
10875         if (rc)
10876                 return rc;
10877
10878         if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10879                 rc = bnxt_alloc_kong_hwrm_resources(bp);
10880                 if (rc)
10881                         bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10882         }
10883
10884         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10885             bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
10886                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10887                 if (rc)
10888                         return rc;
10889         }
10890         rc = bnxt_hwrm_func_reset(bp);
10891         if (rc)
10892                 return -ENODEV;
10893
10894         bnxt_hwrm_fw_set_time(bp);
10895         return 0;
10896 }
10897
10898 static int bnxt_fw_init_one_p2(struct bnxt *bp)
10899 {
10900         int rc;
10901
10902         /* Get the MAX capabilities for this function */
10903         rc = bnxt_hwrm_func_qcaps(bp);
10904         if (rc) {
10905                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10906                            rc);
10907                 return -ENODEV;
10908         }
10909
10910         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
10911         if (rc)
10912                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
10913                             rc);
10914
10915         bnxt_alloc_fw_health(bp);
10916         rc = bnxt_hwrm_error_recovery_qcfg(bp);
10917         if (rc)
10918                 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
10919                             rc);
10920
10921         rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
10922         if (rc)
10923                 return -ENODEV;
10924
10925         bnxt_hwrm_func_qcfg(bp);
10926         bnxt_hwrm_vnic_qcaps(bp);
10927         bnxt_hwrm_port_led_qcaps(bp);
10928         bnxt_ethtool_init(bp);
10929         bnxt_dcb_init(bp);
10930         return 0;
10931 }
10932
10933 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
10934 {
10935         bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
10936         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10937                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10938                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10939                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
10940         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
10941                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10942                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10943                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10944         }
10945 }
10946
10947 static void bnxt_set_dflt_rfs(struct bnxt *bp)
10948 {
10949         struct net_device *dev = bp->dev;
10950
10951         dev->hw_features &= ~NETIF_F_NTUPLE;
10952         dev->features &= ~NETIF_F_NTUPLE;
10953         bp->flags &= ~BNXT_FLAG_RFS;
10954         if (bnxt_rfs_supported(bp)) {
10955                 dev->hw_features |= NETIF_F_NTUPLE;
10956                 if (bnxt_rfs_capable(bp)) {
10957                         bp->flags |= BNXT_FLAG_RFS;
10958                         dev->features |= NETIF_F_NTUPLE;
10959                 }
10960         }
10961 }
10962
10963 static void bnxt_fw_init_one_p3(struct bnxt *bp)
10964 {
10965         struct pci_dev *pdev = bp->pdev;
10966
10967         bnxt_set_dflt_rss_hash_type(bp);
10968         bnxt_set_dflt_rfs(bp);
10969
10970         bnxt_get_wol_settings(bp);
10971         if (bp->flags & BNXT_FLAG_WOL_CAP)
10972                 device_set_wakeup_enable(&pdev->dev, bp->wol);
10973         else
10974                 device_set_wakeup_capable(&pdev->dev, false);
10975
10976         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10977         bnxt_hwrm_coal_params_qcaps(bp);
10978 }
10979
10980 static int bnxt_fw_init_one(struct bnxt *bp)
10981 {
10982         int rc;
10983
10984         rc = bnxt_fw_init_one_p1(bp);
10985         if (rc) {
10986                 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
10987                 return rc;
10988         }
10989         rc = bnxt_fw_init_one_p2(bp);
10990         if (rc) {
10991                 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
10992                 return rc;
10993         }
10994         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
10995         if (rc)
10996                 return rc;
10997
10998         /* In case fw capabilities have changed, destroy the unneeded
10999          * reporters and create newly capable ones.
11000          */
11001         bnxt_dl_fw_reporters_destroy(bp, false);
11002         bnxt_dl_fw_reporters_create(bp);
11003         bnxt_fw_init_one_p3(bp);
11004         return 0;
11005 }
11006
11007 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11008 {
11009         struct bnxt_fw_health *fw_health = bp->fw_health;
11010         u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11011         u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11012         u32 reg_type, reg_off, delay_msecs;
11013
11014         delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11015         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11016         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11017         switch (reg_type) {
11018         case BNXT_FW_HEALTH_REG_TYPE_CFG:
11019                 pci_write_config_dword(bp->pdev, reg_off, val);
11020                 break;
11021         case BNXT_FW_HEALTH_REG_TYPE_GRC:
11022                 writel(reg_off & BNXT_GRC_BASE_MASK,
11023                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11024                 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
11025                 /* fall through */
11026         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
11027                 writel(val, bp->bar0 + reg_off);
11028                 break;
11029         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
11030                 writel(val, bp->bar1 + reg_off);
11031                 break;
11032         }
11033         if (delay_msecs) {
11034                 pci_read_config_dword(bp->pdev, 0, &val);
11035                 msleep(delay_msecs);
11036         }
11037 }
11038
11039 static void bnxt_reset_all(struct bnxt *bp)
11040 {
11041         struct bnxt_fw_health *fw_health = bp->fw_health;
11042         int i, rc;
11043
11044         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11045 #ifdef CONFIG_TEE_BNXT_FW
11046                 rc = tee_bnxt_fw_load();
11047                 if (rc)
11048                         netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
11049                 bp->fw_reset_timestamp = jiffies;
11050 #endif
11051                 return;
11052         }
11053
11054         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
11055                 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
11056                         bnxt_fw_reset_writel(bp, i);
11057         } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
11058                 struct hwrm_fw_reset_input req = {0};
11059
11060                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
11061                 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
11062                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
11063                 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
11064                 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
11065                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11066                 if (rc)
11067                         netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
11068         }
11069         bp->fw_reset_timestamp = jiffies;
11070 }
11071
11072 static void bnxt_fw_reset_task(struct work_struct *work)
11073 {
11074         struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
11075         int rc;
11076
11077         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11078                 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
11079                 return;
11080         }
11081
11082         switch (bp->fw_reset_state) {
11083         case BNXT_FW_RESET_STATE_POLL_VF: {
11084                 int n = bnxt_get_registered_vfs(bp);
11085                 int tmo;
11086
11087                 if (n < 0) {
11088                         netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
11089                                    n, jiffies_to_msecs(jiffies -
11090                                    bp->fw_reset_timestamp));
11091                         goto fw_reset_abort;
11092                 } else if (n > 0) {
11093                         if (time_after(jiffies, bp->fw_reset_timestamp +
11094                                        (bp->fw_reset_max_dsecs * HZ / 10))) {
11095                                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11096                                 bp->fw_reset_state = 0;
11097                                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
11098                                            n);
11099                                 return;
11100                         }
11101                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11102                         return;
11103                 }
11104                 bp->fw_reset_timestamp = jiffies;
11105                 rtnl_lock();
11106                 bnxt_fw_reset_close(bp);
11107                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11108                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11109                         tmo = HZ / 10;
11110                 } else {
11111                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11112                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
11113                 }
11114                 rtnl_unlock();
11115                 bnxt_queue_fw_reset_work(bp, tmo);
11116                 return;
11117         }
11118         case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
11119                 u32 val;
11120
11121                 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11122                 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
11123                     !time_after(jiffies, bp->fw_reset_timestamp +
11124                     (bp->fw_reset_max_dsecs * HZ / 10))) {
11125                         bnxt_queue_fw_reset_work(bp, HZ / 5);
11126                         return;
11127                 }
11128
11129                 if (!bp->fw_health->master) {
11130                         u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
11131
11132                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11133                         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11134                         return;
11135                 }
11136                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11137         }
11138         /* fall through */
11139         case BNXT_FW_RESET_STATE_RESET_FW:
11140                 bnxt_reset_all(bp);
11141                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11142                 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
11143                 return;
11144         case BNXT_FW_RESET_STATE_ENABLE_DEV:
11145                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11146                         u32 val;
11147
11148                         val = bnxt_fw_health_readl(bp,
11149                                                    BNXT_FW_RESET_INPROG_REG);
11150                         if (val)
11151                                 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
11152                                             val);
11153                 }
11154                 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11155                 if (pci_enable_device(bp->pdev)) {
11156                         netdev_err(bp->dev, "Cannot re-enable PCI device\n");
11157                         goto fw_reset_abort;
11158                 }
11159                 pci_set_master(bp->pdev);
11160                 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
11161                 /* fall through */
11162         case BNXT_FW_RESET_STATE_POLL_FW:
11163                 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
11164                 rc = __bnxt_hwrm_ver_get(bp, true);
11165                 if (rc) {
11166                         if (time_after(jiffies, bp->fw_reset_timestamp +
11167                                        (bp->fw_reset_max_dsecs * HZ / 10))) {
11168                                 netdev_err(bp->dev, "Firmware reset aborted\n");
11169                                 goto fw_reset_abort;
11170                         }
11171                         bnxt_queue_fw_reset_work(bp, HZ / 5);
11172                         return;
11173                 }
11174                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
11175                 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
11176                 /* fall through */
11177         case BNXT_FW_RESET_STATE_OPENING:
11178                 while (!rtnl_trylock()) {
11179                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11180                         return;
11181                 }
11182                 rc = bnxt_open(bp->dev);
11183                 if (rc) {
11184                         netdev_err(bp->dev, "bnxt_open_nic() failed\n");
11185                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11186                         dev_close(bp->dev);
11187                 }
11188
11189                 bp->fw_reset_state = 0;
11190                 /* Make sure fw_reset_state is 0 before clearing the flag */
11191                 smp_mb__before_atomic();
11192                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11193                 bnxt_ulp_start(bp, rc);
11194                 if (!rc)
11195                         bnxt_reenable_sriov(bp);
11196                 bnxt_dl_health_recovery_done(bp);
11197                 bnxt_dl_health_status_update(bp, true);
11198                 rtnl_unlock();
11199                 break;
11200         }
11201         return;
11202
11203 fw_reset_abort:
11204         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11205         if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
11206                 bnxt_dl_health_status_update(bp, false);
11207         bp->fw_reset_state = 0;
11208         rtnl_lock();
11209         dev_close(bp->dev);
11210         rtnl_unlock();
11211 }
11212
11213 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
11214 {
11215         int rc;
11216         struct bnxt *bp = netdev_priv(dev);
11217
11218         SET_NETDEV_DEV(dev, &pdev->dev);
11219
11220         /* enable device (incl. PCI PM wakeup), and bus-mastering */
11221         rc = pci_enable_device(pdev);
11222         if (rc) {
11223                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
11224                 goto init_err;
11225         }
11226
11227         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11228                 dev_err(&pdev->dev,
11229                         "Cannot find PCI device base address, aborting\n");
11230                 rc = -ENODEV;
11231                 goto init_err_disable;
11232         }
11233
11234         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11235         if (rc) {
11236                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
11237                 goto init_err_disable;
11238         }
11239
11240         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
11241             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
11242                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
11243                 goto init_err_disable;
11244         }
11245
11246         pci_set_master(pdev);
11247
11248         bp->dev = dev;
11249         bp->pdev = pdev;
11250
11251         /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
11252          * determines the BAR size.
11253          */
11254         bp->bar0 = pci_ioremap_bar(pdev, 0);
11255         if (!bp->bar0) {
11256                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
11257                 rc = -ENOMEM;
11258                 goto init_err_release;
11259         }
11260
11261         bp->bar2 = pci_ioremap_bar(pdev, 4);
11262         if (!bp->bar2) {
11263                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
11264                 rc = -ENOMEM;
11265                 goto init_err_release;
11266         }
11267
11268         pci_enable_pcie_error_reporting(pdev);
11269
11270         INIT_WORK(&bp->sp_task, bnxt_sp_task);
11271         INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
11272
11273         spin_lock_init(&bp->ntp_fltr_lock);
11274 #if BITS_PER_LONG == 32
11275         spin_lock_init(&bp->db_lock);
11276 #endif
11277
11278         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
11279         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
11280
11281         bnxt_init_dflt_coal(bp);
11282
11283         timer_setup(&bp->timer, bnxt_timer, 0);
11284         bp->current_interval = BNXT_TIMER_INTERVAL;
11285
11286         bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
11287         bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
11288
11289         clear_bit(BNXT_STATE_OPEN, &bp->state);
11290         return 0;
11291
11292 init_err_release:
11293         bnxt_unmap_bars(bp, pdev);
11294         pci_release_regions(pdev);
11295
11296 init_err_disable:
11297         pci_disable_device(pdev);
11298
11299 init_err:
11300         return rc;
11301 }
11302
11303 /* rtnl_lock held */
11304 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
11305 {
11306         struct sockaddr *addr = p;
11307         struct bnxt *bp = netdev_priv(dev);
11308         int rc = 0;
11309
11310         if (!is_valid_ether_addr(addr->sa_data))
11311                 return -EADDRNOTAVAIL;
11312
11313         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
11314                 return 0;
11315
11316         rc = bnxt_approve_mac(bp, addr->sa_data, true);
11317         if (rc)
11318                 return rc;
11319
11320         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11321         if (netif_running(dev)) {
11322                 bnxt_close_nic(bp, false, false);
11323                 rc = bnxt_open_nic(bp, false, false);
11324         }
11325
11326         return rc;
11327 }
11328
11329 /* rtnl_lock held */
11330 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
11331 {
11332         struct bnxt *bp = netdev_priv(dev);
11333
11334         if (netif_running(dev))
11335                 bnxt_close_nic(bp, true, false);
11336
11337         dev->mtu = new_mtu;
11338         bnxt_set_ring_params(bp);
11339
11340         if (netif_running(dev))
11341                 return bnxt_open_nic(bp, true, false);
11342
11343         return 0;
11344 }
11345
11346 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
11347 {
11348         struct bnxt *bp = netdev_priv(dev);
11349         bool sh = false;
11350         int rc;
11351
11352         if (tc > bp->max_tc) {
11353                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
11354                            tc, bp->max_tc);
11355                 return -EINVAL;
11356         }
11357
11358         if (netdev_get_num_tc(dev) == tc)
11359                 return 0;
11360
11361         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11362                 sh = true;
11363
11364         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
11365                               sh, tc, bp->tx_nr_rings_xdp);
11366         if (rc)
11367                 return rc;
11368
11369         /* Needs to close the device and do hw resource re-allocations */
11370         if (netif_running(bp->dev))
11371                 bnxt_close_nic(bp, true, false);
11372
11373         if (tc) {
11374                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
11375                 netdev_set_num_tc(dev, tc);
11376         } else {
11377                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11378                 netdev_reset_tc(dev);
11379         }
11380         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
11381         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
11382                                bp->tx_nr_rings + bp->rx_nr_rings;
11383
11384         if (netif_running(bp->dev))
11385                 return bnxt_open_nic(bp, true, false);
11386
11387         return 0;
11388 }
11389
11390 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
11391                                   void *cb_priv)
11392 {
11393         struct bnxt *bp = cb_priv;
11394
11395         if (!bnxt_tc_flower_enabled(bp) ||
11396             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
11397                 return -EOPNOTSUPP;
11398
11399         switch (type) {
11400         case TC_SETUP_CLSFLOWER:
11401                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
11402         default:
11403                 return -EOPNOTSUPP;
11404         }
11405 }
11406
11407 LIST_HEAD(bnxt_block_cb_list);
11408
11409 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
11410                          void *type_data)
11411 {
11412         struct bnxt *bp = netdev_priv(dev);
11413
11414         switch (type) {
11415         case TC_SETUP_BLOCK:
11416                 return flow_block_cb_setup_simple(type_data,
11417                                                   &bnxt_block_cb_list,
11418                                                   bnxt_setup_tc_block_cb,
11419                                                   bp, bp, true);
11420         case TC_SETUP_QDISC_MQPRIO: {
11421                 struct tc_mqprio_qopt *mqprio = type_data;
11422
11423                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
11424
11425                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
11426         }
11427         default:
11428                 return -EOPNOTSUPP;
11429         }
11430 }
11431
11432 #ifdef CONFIG_RFS_ACCEL
11433 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
11434                             struct bnxt_ntuple_filter *f2)
11435 {
11436         struct flow_keys *keys1 = &f1->fkeys;
11437         struct flow_keys *keys2 = &f2->fkeys;
11438
11439         if (keys1->basic.n_proto != keys2->basic.n_proto ||
11440             keys1->basic.ip_proto != keys2->basic.ip_proto)
11441                 return false;
11442
11443         if (keys1->basic.n_proto == htons(ETH_P_IP)) {
11444                 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
11445                     keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
11446                         return false;
11447         } else {
11448                 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
11449                            sizeof(keys1->addrs.v6addrs.src)) ||
11450                     memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
11451                            sizeof(keys1->addrs.v6addrs.dst)))
11452                         return false;
11453         }
11454
11455         if (keys1->ports.ports == keys2->ports.ports &&
11456             keys1->control.flags == keys2->control.flags &&
11457             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
11458             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
11459                 return true;
11460
11461         return false;
11462 }
11463
11464 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
11465                               u16 rxq_index, u32 flow_id)
11466 {
11467         struct bnxt *bp = netdev_priv(dev);
11468         struct bnxt_ntuple_filter *fltr, *new_fltr;
11469         struct flow_keys *fkeys;
11470         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
11471         int rc = 0, idx, bit_id, l2_idx = 0;
11472         struct hlist_head *head;
11473         u32 flags;
11474
11475         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
11476                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11477                 int off = 0, j;
11478
11479                 netif_addr_lock_bh(dev);
11480                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
11481                         if (ether_addr_equal(eth->h_dest,
11482                                              vnic->uc_list + off)) {
11483                                 l2_idx = j + 1;
11484                                 break;
11485                         }
11486                 }
11487                 netif_addr_unlock_bh(dev);
11488                 if (!l2_idx)
11489                         return -EINVAL;
11490         }
11491         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
11492         if (!new_fltr)
11493                 return -ENOMEM;
11494
11495         fkeys = &new_fltr->fkeys;
11496         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
11497                 rc = -EPROTONOSUPPORT;
11498                 goto err_free;
11499         }
11500
11501         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
11502              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
11503             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
11504              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
11505                 rc = -EPROTONOSUPPORT;
11506                 goto err_free;
11507         }
11508         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
11509             bp->hwrm_spec_code < 0x10601) {
11510                 rc = -EPROTONOSUPPORT;
11511                 goto err_free;
11512         }
11513         flags = fkeys->control.flags;
11514         if (((flags & FLOW_DIS_ENCAPSULATION) &&
11515              bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
11516                 rc = -EPROTONOSUPPORT;
11517                 goto err_free;
11518         }
11519
11520         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
11521         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
11522
11523         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
11524         head = &bp->ntp_fltr_hash_tbl[idx];
11525         rcu_read_lock();
11526         hlist_for_each_entry_rcu(fltr, head, hash) {
11527                 if (bnxt_fltr_match(fltr, new_fltr)) {
11528                         rcu_read_unlock();
11529                         rc = 0;
11530                         goto err_free;
11531                 }
11532         }
11533         rcu_read_unlock();
11534
11535         spin_lock_bh(&bp->ntp_fltr_lock);
11536         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
11537                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
11538         if (bit_id < 0) {
11539                 spin_unlock_bh(&bp->ntp_fltr_lock);
11540                 rc = -ENOMEM;
11541                 goto err_free;
11542         }
11543
11544         new_fltr->sw_id = (u16)bit_id;
11545         new_fltr->flow_id = flow_id;
11546         new_fltr->l2_fltr_idx = l2_idx;
11547         new_fltr->rxq = rxq_index;
11548         hlist_add_head_rcu(&new_fltr->hash, head);
11549         bp->ntp_fltr_count++;
11550         spin_unlock_bh(&bp->ntp_fltr_lock);
11551
11552         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11553         bnxt_queue_sp_work(bp);
11554
11555         return new_fltr->sw_id;
11556
11557 err_free:
11558         kfree(new_fltr);
11559         return rc;
11560 }
11561
11562 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11563 {
11564         int i;
11565
11566         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
11567                 struct hlist_head *head;
11568                 struct hlist_node *tmp;
11569                 struct bnxt_ntuple_filter *fltr;
11570                 int rc;
11571
11572                 head = &bp->ntp_fltr_hash_tbl[i];
11573                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
11574                         bool del = false;
11575
11576                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
11577                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
11578                                                         fltr->flow_id,
11579                                                         fltr->sw_id)) {
11580                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
11581                                                                          fltr);
11582                                         del = true;
11583                                 }
11584                         } else {
11585                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
11586                                                                        fltr);
11587                                 if (rc)
11588                                         del = true;
11589                                 else
11590                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
11591                         }
11592
11593                         if (del) {
11594                                 spin_lock_bh(&bp->ntp_fltr_lock);
11595                                 hlist_del_rcu(&fltr->hash);
11596                                 bp->ntp_fltr_count--;
11597                                 spin_unlock_bh(&bp->ntp_fltr_lock);
11598                                 synchronize_rcu();
11599                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
11600                                 kfree(fltr);
11601                         }
11602                 }
11603         }
11604         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
11605                 netdev_info(bp->dev, "Receive PF driver unload event!\n");
11606 }
11607
11608 #else
11609
11610 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11611 {
11612 }
11613
11614 #endif /* CONFIG_RFS_ACCEL */
11615
11616 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
11617 {
11618         struct bnxt *bp = netdev_priv(netdev);
11619         struct udp_tunnel_info ti;
11620         unsigned int cmd;
11621
11622         udp_tunnel_nic_get_port(netdev, table, 0, &ti);
11623         if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
11624                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
11625         else
11626                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
11627
11628         if (ti.port)
11629                 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
11630
11631         return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
11632 }
11633
11634 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
11635         .sync_table     = bnxt_udp_tunnel_sync,
11636         .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
11637                           UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
11638         .tables         = {
11639                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
11640                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
11641         },
11642 };
11643
11644 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11645                                struct net_device *dev, u32 filter_mask,
11646                                int nlflags)
11647 {
11648         struct bnxt *bp = netdev_priv(dev);
11649
11650         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
11651                                        nlflags, filter_mask, NULL);
11652 }
11653
11654 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
11655                                u16 flags, struct netlink_ext_ack *extack)
11656 {
11657         struct bnxt *bp = netdev_priv(dev);
11658         struct nlattr *attr, *br_spec;
11659         int rem, rc = 0;
11660
11661         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
11662                 return -EOPNOTSUPP;
11663
11664         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11665         if (!br_spec)
11666                 return -EINVAL;
11667
11668         nla_for_each_nested(attr, br_spec, rem) {
11669                 u16 mode;
11670
11671                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11672                         continue;
11673
11674                 if (nla_len(attr) < sizeof(mode))
11675                         return -EINVAL;
11676
11677                 mode = nla_get_u16(attr);
11678                 if (mode == bp->br_mode)
11679                         break;
11680
11681                 rc = bnxt_hwrm_set_br_mode(bp, mode);
11682                 if (!rc)
11683                         bp->br_mode = mode;
11684                 break;
11685         }
11686         return rc;
11687 }
11688
11689 int bnxt_get_port_parent_id(struct net_device *dev,
11690                             struct netdev_phys_item_id *ppid)
11691 {
11692         struct bnxt *bp = netdev_priv(dev);
11693
11694         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
11695                 return -EOPNOTSUPP;
11696
11697         /* The PF and it's VF-reps only support the switchdev framework */
11698         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
11699                 return -EOPNOTSUPP;
11700
11701         ppid->id_len = sizeof(bp->dsn);
11702         memcpy(ppid->id, bp->dsn, ppid->id_len);
11703
11704         return 0;
11705 }
11706
11707 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
11708 {
11709         struct bnxt *bp = netdev_priv(dev);
11710
11711         return &bp->dl_port;
11712 }
11713
11714 static const struct net_device_ops bnxt_netdev_ops = {
11715         .ndo_open               = bnxt_open,
11716         .ndo_start_xmit         = bnxt_start_xmit,
11717         .ndo_stop               = bnxt_close,
11718         .ndo_get_stats64        = bnxt_get_stats64,
11719         .ndo_set_rx_mode        = bnxt_set_rx_mode,
11720         .ndo_do_ioctl           = bnxt_ioctl,
11721         .ndo_validate_addr      = eth_validate_addr,
11722         .ndo_set_mac_address    = bnxt_change_mac_addr,
11723         .ndo_change_mtu         = bnxt_change_mtu,
11724         .ndo_fix_features       = bnxt_fix_features,
11725         .ndo_set_features       = bnxt_set_features,
11726         .ndo_tx_timeout         = bnxt_tx_timeout,
11727 #ifdef CONFIG_BNXT_SRIOV
11728         .ndo_get_vf_config      = bnxt_get_vf_config,
11729         .ndo_set_vf_mac         = bnxt_set_vf_mac,
11730         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
11731         .ndo_set_vf_rate        = bnxt_set_vf_bw,
11732         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
11733         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
11734         .ndo_set_vf_trust       = bnxt_set_vf_trust,
11735 #endif
11736         .ndo_setup_tc           = bnxt_setup_tc,
11737 #ifdef CONFIG_RFS_ACCEL
11738         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
11739 #endif
11740         .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
11741         .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
11742         .ndo_bpf                = bnxt_xdp,
11743         .ndo_xdp_xmit           = bnxt_xdp_xmit,
11744         .ndo_bridge_getlink     = bnxt_bridge_getlink,
11745         .ndo_bridge_setlink     = bnxt_bridge_setlink,
11746         .ndo_get_devlink_port   = bnxt_get_devlink_port,
11747 };
11748
11749 static void bnxt_remove_one(struct pci_dev *pdev)
11750 {
11751         struct net_device *dev = pci_get_drvdata(pdev);
11752         struct bnxt *bp = netdev_priv(dev);
11753
11754         if (BNXT_PF(bp))
11755                 bnxt_sriov_disable(bp);
11756
11757         bnxt_dl_fw_reporters_destroy(bp, true);
11758         if (BNXT_PF(bp))
11759                 devlink_port_type_clear(&bp->dl_port);
11760         pci_disable_pcie_error_reporting(pdev);
11761         unregister_netdev(dev);
11762         bnxt_dl_unregister(bp);
11763         bnxt_shutdown_tc(bp);
11764         bnxt_cancel_sp_work(bp);
11765         bp->sp_event = 0;
11766
11767         bnxt_clear_int_mode(bp);
11768         bnxt_hwrm_func_drv_unrgtr(bp);
11769         bnxt_free_hwrm_resources(bp);
11770         bnxt_free_hwrm_short_cmd_req(bp);
11771         bnxt_ethtool_free(bp);
11772         bnxt_dcb_free(bp);
11773         kfree(bp->edev);
11774         bp->edev = NULL;
11775         kfree(bp->fw_health);
11776         bp->fw_health = NULL;
11777         bnxt_cleanup_pci(bp);
11778         bnxt_free_ctx_mem(bp);
11779         kfree(bp->ctx);
11780         bp->ctx = NULL;
11781         kfree(bp->rss_indir_tbl);
11782         bp->rss_indir_tbl = NULL;
11783         bnxt_free_port_stats(bp);
11784         free_netdev(dev);
11785 }
11786
11787 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
11788 {
11789         int rc = 0;
11790         struct bnxt_link_info *link_info = &bp->link_info;
11791
11792         rc = bnxt_hwrm_phy_qcaps(bp);
11793         if (rc) {
11794                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
11795                            rc);
11796                 return rc;
11797         }
11798         if (!fw_dflt)
11799                 return 0;
11800
11801         rc = bnxt_update_link(bp, false);
11802         if (rc) {
11803                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
11804                            rc);
11805                 return rc;
11806         }
11807
11808         /* Older firmware does not have supported_auto_speeds, so assume
11809          * that all supported speeds can be autonegotiated.
11810          */
11811         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
11812                 link_info->support_auto_speeds = link_info->support_speeds;
11813
11814         bnxt_init_ethtool_link_settings(bp);
11815         return 0;
11816 }
11817
11818 static int bnxt_get_max_irq(struct pci_dev *pdev)
11819 {
11820         u16 ctrl;
11821
11822         if (!pdev->msix_cap)
11823                 return 1;
11824
11825         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
11826         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
11827 }
11828
11829 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11830                                 int *max_cp)
11831 {
11832         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11833         int max_ring_grps = 0, max_irq;
11834
11835         *max_tx = hw_resc->max_tx_rings;
11836         *max_rx = hw_resc->max_rx_rings;
11837         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
11838         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
11839                         bnxt_get_ulp_msix_num(bp),
11840                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
11841         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11842                 *max_cp = min_t(int, *max_cp, max_irq);
11843         max_ring_grps = hw_resc->max_hw_ring_grps;
11844         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
11845                 *max_cp -= 1;
11846                 *max_rx -= 2;
11847         }
11848         if (bp->flags & BNXT_FLAG_AGG_RINGS)
11849                 *max_rx >>= 1;
11850         if (bp->flags & BNXT_FLAG_CHIP_P5) {
11851                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
11852                 /* On P5 chips, max_cp output param should be available NQs */
11853                 *max_cp = max_irq;
11854         }
11855         *max_rx = min_t(int, *max_rx, max_ring_grps);
11856 }
11857
11858 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
11859 {
11860         int rx, tx, cp;
11861
11862         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
11863         *max_rx = rx;
11864         *max_tx = tx;
11865         if (!rx || !tx || !cp)
11866                 return -ENOMEM;
11867
11868         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
11869 }
11870
11871 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11872                                bool shared)
11873 {
11874         int rc;
11875
11876         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11877         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
11878                 /* Not enough rings, try disabling agg rings. */
11879                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
11880                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11881                 if (rc) {
11882                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
11883                         bp->flags |= BNXT_FLAG_AGG_RINGS;
11884                         return rc;
11885                 }
11886                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
11887                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11888                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11889                 bnxt_set_ring_params(bp);
11890         }
11891
11892         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
11893                 int max_cp, max_stat, max_irq;
11894
11895                 /* Reserve minimum resources for RoCE */
11896                 max_cp = bnxt_get_max_func_cp_rings(bp);
11897                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
11898                 max_irq = bnxt_get_max_func_irqs(bp);
11899                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
11900                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
11901                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
11902                         return 0;
11903
11904                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
11905                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
11906                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
11907                 max_cp = min_t(int, max_cp, max_irq);
11908                 max_cp = min_t(int, max_cp, max_stat);
11909                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
11910                 if (rc)
11911                         rc = 0;
11912         }
11913         return rc;
11914 }
11915
11916 /* In initial default shared ring setting, each shared ring must have a
11917  * RX/TX ring pair.
11918  */
11919 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
11920 {
11921         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
11922         bp->rx_nr_rings = bp->cp_nr_rings;
11923         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
11924         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11925 }
11926
11927 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
11928 {
11929         int dflt_rings, max_rx_rings, max_tx_rings, rc;
11930
11931         if (!bnxt_can_reserve_rings(bp))
11932                 return 0;
11933
11934         if (sh)
11935                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
11936         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
11937         /* Reduce default rings on multi-port cards so that total default
11938          * rings do not exceed CPU count.
11939          */
11940         if (bp->port_count > 1) {
11941                 int max_rings =
11942                         max_t(int, num_online_cpus() / bp->port_count, 1);
11943
11944                 dflt_rings = min_t(int, dflt_rings, max_rings);
11945         }
11946         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
11947         if (rc)
11948                 return rc;
11949         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
11950         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
11951         if (sh)
11952                 bnxt_trim_dflt_sh_rings(bp);
11953         else
11954                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
11955         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11956
11957         rc = __bnxt_reserve_rings(bp);
11958         if (rc)
11959                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
11960         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11961         if (sh)
11962                 bnxt_trim_dflt_sh_rings(bp);
11963
11964         /* Rings may have been trimmed, re-reserve the trimmed rings. */
11965         if (bnxt_need_reserve_rings(bp)) {
11966                 rc = __bnxt_reserve_rings(bp);
11967                 if (rc)
11968                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
11969                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11970         }
11971         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11972                 bp->rx_nr_rings++;
11973                 bp->cp_nr_rings++;
11974         }
11975         if (rc) {
11976                 bp->tx_nr_rings = 0;
11977                 bp->rx_nr_rings = 0;
11978         }
11979         return rc;
11980 }
11981
11982 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
11983 {
11984         int rc;
11985
11986         if (bp->tx_nr_rings)
11987                 return 0;
11988
11989         bnxt_ulp_irq_stop(bp);
11990         bnxt_clear_int_mode(bp);
11991         rc = bnxt_set_dflt_rings(bp, true);
11992         if (rc) {
11993                 netdev_err(bp->dev, "Not enough rings available.\n");
11994                 goto init_dflt_ring_err;
11995         }
11996         rc = bnxt_init_int_mode(bp);
11997         if (rc)
11998                 goto init_dflt_ring_err;
11999
12000         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12001         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
12002                 bp->flags |= BNXT_FLAG_RFS;
12003                 bp->dev->features |= NETIF_F_NTUPLE;
12004         }
12005 init_dflt_ring_err:
12006         bnxt_ulp_irq_restart(bp, rc);
12007         return rc;
12008 }
12009
12010 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
12011 {
12012         int rc;
12013
12014         ASSERT_RTNL();
12015         bnxt_hwrm_func_qcaps(bp);
12016
12017         if (netif_running(bp->dev))
12018                 __bnxt_close_nic(bp, true, false);
12019
12020         bnxt_ulp_irq_stop(bp);
12021         bnxt_clear_int_mode(bp);
12022         rc = bnxt_init_int_mode(bp);
12023         bnxt_ulp_irq_restart(bp, rc);
12024
12025         if (netif_running(bp->dev)) {
12026                 if (rc)
12027                         dev_close(bp->dev);
12028                 else
12029                         rc = bnxt_open_nic(bp, true, false);
12030         }
12031
12032         return rc;
12033 }
12034
12035 static int bnxt_init_mac_addr(struct bnxt *bp)
12036 {
12037         int rc = 0;
12038
12039         if (BNXT_PF(bp)) {
12040                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
12041         } else {
12042 #ifdef CONFIG_BNXT_SRIOV
12043                 struct bnxt_vf_info *vf = &bp->vf;
12044                 bool strict_approval = true;
12045
12046                 if (is_valid_ether_addr(vf->mac_addr)) {
12047                         /* overwrite netdev dev_addr with admin VF MAC */
12048                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
12049                         /* Older PF driver or firmware may not approve this
12050                          * correctly.
12051                          */
12052                         strict_approval = false;
12053                 } else {
12054                         eth_hw_addr_random(bp->dev);
12055                 }
12056                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
12057 #endif
12058         }
12059         return rc;
12060 }
12061
12062 #define BNXT_VPD_LEN    512
12063 static void bnxt_vpd_read_info(struct bnxt *bp)
12064 {
12065         struct pci_dev *pdev = bp->pdev;
12066         int i, len, pos, ro_size;
12067         ssize_t vpd_size;
12068         u8 *vpd_data;
12069
12070         vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
12071         if (!vpd_data)
12072                 return;
12073
12074         vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
12075         if (vpd_size <= 0) {
12076                 netdev_err(bp->dev, "Unable to read VPD\n");
12077                 goto exit;
12078         }
12079
12080         i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
12081         if (i < 0) {
12082                 netdev_err(bp->dev, "VPD READ-Only not found\n");
12083                 goto exit;
12084         }
12085
12086         ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
12087         i += PCI_VPD_LRDT_TAG_SIZE;
12088         if (i + ro_size > vpd_size)
12089                 goto exit;
12090
12091         pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12092                                         PCI_VPD_RO_KEYWORD_PARTNO);
12093         if (pos < 0)
12094                 goto read_sn;
12095
12096         len = pci_vpd_info_field_size(&vpd_data[pos]);
12097         pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12098         if (len + pos > vpd_size)
12099                 goto read_sn;
12100
12101         strlcpy(bp->board_partno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
12102
12103 read_sn:
12104         pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12105                                         PCI_VPD_RO_KEYWORD_SERIALNO);
12106         if (pos < 0)
12107                 goto exit;
12108
12109         len = pci_vpd_info_field_size(&vpd_data[pos]);
12110         pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12111         if (len + pos > vpd_size)
12112                 goto exit;
12113
12114         strlcpy(bp->board_serialno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
12115 exit:
12116         kfree(vpd_data);
12117 }
12118
12119 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
12120 {
12121         struct pci_dev *pdev = bp->pdev;
12122         u64 qword;
12123
12124         qword = pci_get_dsn(pdev);
12125         if (!qword) {
12126                 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
12127                 return -EOPNOTSUPP;
12128         }
12129
12130         put_unaligned_le64(qword, dsn);
12131
12132         bp->flags |= BNXT_FLAG_DSN_VALID;
12133         return 0;
12134 }
12135
12136 static int bnxt_map_db_bar(struct bnxt *bp)
12137 {
12138         if (!bp->db_size)
12139                 return -ENODEV;
12140         bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
12141         if (!bp->bar1)
12142                 return -ENOMEM;
12143         return 0;
12144 }
12145
12146 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
12147 {
12148         struct net_device *dev;
12149         struct bnxt *bp;
12150         int rc, max_irqs;
12151
12152         if (pci_is_bridge(pdev))
12153                 return -ENODEV;
12154
12155         /* Clear any pending DMA transactions from crash kernel
12156          * while loading driver in capture kernel.
12157          */
12158         if (is_kdump_kernel()) {
12159                 pci_clear_master(pdev);
12160                 pcie_flr(pdev);
12161         }
12162
12163         max_irqs = bnxt_get_max_irq(pdev);
12164         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
12165         if (!dev)
12166                 return -ENOMEM;
12167
12168         bp = netdev_priv(dev);
12169         bnxt_set_max_func_irqs(bp, max_irqs);
12170
12171         if (bnxt_vf_pciid(ent->driver_data))
12172                 bp->flags |= BNXT_FLAG_VF;
12173
12174         if (pdev->msix_cap)
12175                 bp->flags |= BNXT_FLAG_MSIX_CAP;
12176
12177         rc = bnxt_init_board(pdev, dev);
12178         if (rc < 0)
12179                 goto init_err_free;
12180
12181         dev->netdev_ops = &bnxt_netdev_ops;
12182         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
12183         dev->ethtool_ops = &bnxt_ethtool_ops;
12184         pci_set_drvdata(pdev, dev);
12185
12186         if (BNXT_PF(bp))
12187                 bnxt_vpd_read_info(bp);
12188
12189         rc = bnxt_alloc_hwrm_resources(bp);
12190         if (rc)
12191                 goto init_err_pci_clean;
12192
12193         mutex_init(&bp->hwrm_cmd_lock);
12194         mutex_init(&bp->link_lock);
12195
12196         rc = bnxt_fw_init_one_p1(bp);
12197         if (rc)
12198                 goto init_err_pci_clean;
12199
12200         if (BNXT_CHIP_P5(bp))
12201                 bp->flags |= BNXT_FLAG_CHIP_P5;
12202
12203         rc = bnxt_fw_init_one_p2(bp);
12204         if (rc)
12205                 goto init_err_pci_clean;
12206
12207         rc = bnxt_map_db_bar(bp);
12208         if (rc) {
12209                 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
12210                         rc);
12211                 goto init_err_pci_clean;
12212         }
12213
12214         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12215                            NETIF_F_TSO | NETIF_F_TSO6 |
12216                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
12217                            NETIF_F_GSO_IPXIP4 |
12218                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
12219                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
12220                            NETIF_F_RXCSUM | NETIF_F_GRO;
12221
12222         if (BNXT_SUPPORTS_TPA(bp))
12223                 dev->hw_features |= NETIF_F_LRO;
12224
12225         dev->hw_enc_features =
12226                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12227                         NETIF_F_TSO | NETIF_F_TSO6 |
12228                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
12229                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
12230                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
12231         dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
12232
12233         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
12234                                     NETIF_F_GSO_GRE_CSUM;
12235         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
12236         if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
12237                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
12238         if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
12239                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
12240         if (BNXT_SUPPORTS_TPA(bp))
12241                 dev->hw_features |= NETIF_F_GRO_HW;
12242         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
12243         if (dev->features & NETIF_F_GRO_HW)
12244                 dev->features &= ~NETIF_F_LRO;
12245         dev->priv_flags |= IFF_UNICAST_FLT;
12246
12247 #ifdef CONFIG_BNXT_SRIOV
12248         init_waitqueue_head(&bp->sriov_cfg_wait);
12249         mutex_init(&bp->sriov_lock);
12250 #endif
12251         if (BNXT_SUPPORTS_TPA(bp)) {
12252                 bp->gro_func = bnxt_gro_func_5730x;
12253                 if (BNXT_CHIP_P4(bp))
12254                         bp->gro_func = bnxt_gro_func_5731x;
12255                 else if (BNXT_CHIP_P5(bp))
12256                         bp->gro_func = bnxt_gro_func_5750x;
12257         }
12258         if (!BNXT_CHIP_P4_PLUS(bp))
12259                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
12260
12261         bp->ulp_probe = bnxt_ulp_probe;
12262
12263         rc = bnxt_init_mac_addr(bp);
12264         if (rc) {
12265                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
12266                 rc = -EADDRNOTAVAIL;
12267                 goto init_err_pci_clean;
12268         }
12269
12270         if (BNXT_PF(bp)) {
12271                 /* Read the adapter's DSN to use as the eswitch switch_id */
12272                 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
12273         }
12274
12275         /* MTU range: 60 - FW defined max */
12276         dev->min_mtu = ETH_ZLEN;
12277         dev->max_mtu = bp->max_mtu;
12278
12279         rc = bnxt_probe_phy(bp, true);
12280         if (rc)
12281                 goto init_err_pci_clean;
12282
12283         bnxt_set_rx_skb_mode(bp, false);
12284         bnxt_set_tpa_flags(bp);
12285         bnxt_set_ring_params(bp);
12286         rc = bnxt_set_dflt_rings(bp, true);
12287         if (rc) {
12288                 netdev_err(bp->dev, "Not enough rings available.\n");
12289                 rc = -ENOMEM;
12290                 goto init_err_pci_clean;
12291         }
12292
12293         bnxt_fw_init_one_p3(bp);
12294
12295         if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12296                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
12297
12298         rc = bnxt_init_int_mode(bp);
12299         if (rc)
12300                 goto init_err_pci_clean;
12301
12302         /* No TC has been set yet and rings may have been trimmed due to
12303          * limited MSIX, so we re-initialize the TX rings per TC.
12304          */
12305         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12306
12307         rc = bnxt_alloc_rss_indir_tbl(bp);
12308         if (rc)
12309                 goto init_err_pci_clean;
12310         bnxt_set_dflt_rss_indir_tbl(bp);
12311
12312         if (BNXT_PF(bp)) {
12313                 if (!bnxt_pf_wq) {
12314                         bnxt_pf_wq =
12315                                 create_singlethread_workqueue("bnxt_pf_wq");
12316                         if (!bnxt_pf_wq) {
12317                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
12318                                 goto init_err_pci_clean;
12319                         }
12320                 }
12321                 rc = bnxt_init_tc(bp);
12322                 if (rc)
12323                         netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
12324                                    rc);
12325         }
12326
12327         bnxt_dl_register(bp);
12328
12329         rc = register_netdev(dev);
12330         if (rc)
12331                 goto init_err_cleanup;
12332
12333         if (BNXT_PF(bp))
12334                 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
12335         bnxt_dl_fw_reporters_create(bp);
12336
12337         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
12338                     board_info[ent->driver_data].name,
12339                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
12340         pcie_print_link_status(pdev);
12341
12342         return 0;
12343
12344 init_err_cleanup:
12345         bnxt_dl_unregister(bp);
12346         bnxt_shutdown_tc(bp);
12347         bnxt_clear_int_mode(bp);
12348
12349 init_err_pci_clean:
12350         bnxt_hwrm_func_drv_unrgtr(bp);
12351         bnxt_free_hwrm_short_cmd_req(bp);
12352         bnxt_free_hwrm_resources(bp);
12353         kfree(bp->fw_health);
12354         bp->fw_health = NULL;
12355         bnxt_cleanup_pci(bp);
12356         bnxt_free_ctx_mem(bp);
12357         kfree(bp->ctx);
12358         bp->ctx = NULL;
12359         kfree(bp->rss_indir_tbl);
12360         bp->rss_indir_tbl = NULL;
12361
12362 init_err_free:
12363         free_netdev(dev);
12364         return rc;
12365 }
12366
12367 static void bnxt_shutdown(struct pci_dev *pdev)
12368 {
12369         struct net_device *dev = pci_get_drvdata(pdev);
12370         struct bnxt *bp;
12371
12372         if (!dev)
12373                 return;
12374
12375         rtnl_lock();
12376         bp = netdev_priv(dev);
12377         if (!bp)
12378                 goto shutdown_exit;
12379
12380         if (netif_running(dev))
12381                 dev_close(dev);
12382
12383         bnxt_ulp_shutdown(bp);
12384         bnxt_clear_int_mode(bp);
12385         pci_disable_device(pdev);
12386
12387         if (system_state == SYSTEM_POWER_OFF) {
12388                 pci_wake_from_d3(pdev, bp->wol);
12389                 pci_set_power_state(pdev, PCI_D3hot);
12390         }
12391
12392 shutdown_exit:
12393         rtnl_unlock();
12394 }
12395
12396 #ifdef CONFIG_PM_SLEEP
12397 static int bnxt_suspend(struct device *device)
12398 {
12399         struct net_device *dev = dev_get_drvdata(device);
12400         struct bnxt *bp = netdev_priv(dev);
12401         int rc = 0;
12402
12403         rtnl_lock();
12404         bnxt_ulp_stop(bp);
12405         if (netif_running(dev)) {
12406                 netif_device_detach(dev);
12407                 rc = bnxt_close(dev);
12408         }
12409         bnxt_hwrm_func_drv_unrgtr(bp);
12410         pci_disable_device(bp->pdev);
12411         bnxt_free_ctx_mem(bp);
12412         kfree(bp->ctx);
12413         bp->ctx = NULL;
12414         rtnl_unlock();
12415         return rc;
12416 }
12417
12418 static int bnxt_resume(struct device *device)
12419 {
12420         struct net_device *dev = dev_get_drvdata(device);
12421         struct bnxt *bp = netdev_priv(dev);
12422         int rc = 0;
12423
12424         rtnl_lock();
12425         rc = pci_enable_device(bp->pdev);
12426         if (rc) {
12427                 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
12428                            rc);
12429                 goto resume_exit;
12430         }
12431         pci_set_master(bp->pdev);
12432         if (bnxt_hwrm_ver_get(bp)) {
12433                 rc = -ENODEV;
12434                 goto resume_exit;
12435         }
12436         rc = bnxt_hwrm_func_reset(bp);
12437         if (rc) {
12438                 rc = -EBUSY;
12439                 goto resume_exit;
12440         }
12441
12442         rc = bnxt_hwrm_func_qcaps(bp);
12443         if (rc)
12444                 goto resume_exit;
12445
12446         if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
12447                 rc = -ENODEV;
12448                 goto resume_exit;
12449         }
12450
12451         bnxt_get_wol_settings(bp);
12452         if (netif_running(dev)) {
12453                 rc = bnxt_open(dev);
12454                 if (!rc)
12455                         netif_device_attach(dev);
12456         }
12457
12458 resume_exit:
12459         bnxt_ulp_start(bp, rc);
12460         if (!rc)
12461                 bnxt_reenable_sriov(bp);
12462         rtnl_unlock();
12463         return rc;
12464 }
12465
12466 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
12467 #define BNXT_PM_OPS (&bnxt_pm_ops)
12468
12469 #else
12470
12471 #define BNXT_PM_OPS NULL
12472
12473 #endif /* CONFIG_PM_SLEEP */
12474
12475 /**
12476  * bnxt_io_error_detected - called when PCI error is detected
12477  * @pdev: Pointer to PCI device
12478  * @state: The current pci connection state
12479  *
12480  * This function is called after a PCI bus error affecting
12481  * this device has been detected.
12482  */
12483 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
12484                                                pci_channel_state_t state)
12485 {
12486         struct net_device *netdev = pci_get_drvdata(pdev);
12487         struct bnxt *bp = netdev_priv(netdev);
12488
12489         netdev_info(netdev, "PCI I/O error detected\n");
12490
12491         rtnl_lock();
12492         netif_device_detach(netdev);
12493
12494         bnxt_ulp_stop(bp);
12495
12496         if (state == pci_channel_io_perm_failure) {
12497                 rtnl_unlock();
12498                 return PCI_ERS_RESULT_DISCONNECT;
12499         }
12500
12501         if (netif_running(netdev))
12502                 bnxt_close(netdev);
12503
12504         pci_disable_device(pdev);
12505         bnxt_free_ctx_mem(bp);
12506         kfree(bp->ctx);
12507         bp->ctx = NULL;
12508         rtnl_unlock();
12509
12510         /* Request a slot slot reset. */
12511         return PCI_ERS_RESULT_NEED_RESET;
12512 }
12513
12514 /**
12515  * bnxt_io_slot_reset - called after the pci bus has been reset.
12516  * @pdev: Pointer to PCI device
12517  *
12518  * Restart the card from scratch, as if from a cold-boot.
12519  * At this point, the card has exprienced a hard reset,
12520  * followed by fixups by BIOS, and has its config space
12521  * set up identically to what it was at cold boot.
12522  */
12523 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
12524 {
12525         struct net_device *netdev = pci_get_drvdata(pdev);
12526         struct bnxt *bp = netdev_priv(netdev);
12527         int err = 0;
12528         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
12529
12530         netdev_info(bp->dev, "PCI Slot Reset\n");
12531
12532         rtnl_lock();
12533
12534         if (pci_enable_device(pdev)) {
12535                 dev_err(&pdev->dev,
12536                         "Cannot re-enable PCI device after reset.\n");
12537         } else {
12538                 pci_set_master(pdev);
12539
12540                 err = bnxt_hwrm_func_reset(bp);
12541                 if (!err) {
12542                         err = bnxt_hwrm_func_qcaps(bp);
12543                         if (!err && netif_running(netdev))
12544                                 err = bnxt_open(netdev);
12545                 }
12546                 bnxt_ulp_start(bp, err);
12547                 if (!err) {
12548                         bnxt_reenable_sriov(bp);
12549                         result = PCI_ERS_RESULT_RECOVERED;
12550                 }
12551         }
12552
12553         if (result != PCI_ERS_RESULT_RECOVERED) {
12554                 if (netif_running(netdev))
12555                         dev_close(netdev);
12556                 pci_disable_device(pdev);
12557         }
12558
12559         rtnl_unlock();
12560
12561         return result;
12562 }
12563
12564 /**
12565  * bnxt_io_resume - called when traffic can start flowing again.
12566  * @pdev: Pointer to PCI device
12567  *
12568  * This callback is called when the error recovery driver tells
12569  * us that its OK to resume normal operation.
12570  */
12571 static void bnxt_io_resume(struct pci_dev *pdev)
12572 {
12573         struct net_device *netdev = pci_get_drvdata(pdev);
12574
12575         rtnl_lock();
12576
12577         netif_device_attach(netdev);
12578
12579         rtnl_unlock();
12580 }
12581
12582 static const struct pci_error_handlers bnxt_err_handler = {
12583         .error_detected = bnxt_io_error_detected,
12584         .slot_reset     = bnxt_io_slot_reset,
12585         .resume         = bnxt_io_resume
12586 };
12587
12588 static struct pci_driver bnxt_pci_driver = {
12589         .name           = DRV_MODULE_NAME,
12590         .id_table       = bnxt_pci_tbl,
12591         .probe          = bnxt_init_one,
12592         .remove         = bnxt_remove_one,
12593         .shutdown       = bnxt_shutdown,
12594         .driver.pm      = BNXT_PM_OPS,
12595         .err_handler    = &bnxt_err_handler,
12596 #if defined(CONFIG_BNXT_SRIOV)
12597         .sriov_configure = bnxt_sriov_configure,
12598 #endif
12599 };
12600
12601 static int __init bnxt_init(void)
12602 {
12603         bnxt_debug_init();
12604         return pci_register_driver(&bnxt_pci_driver);
12605 }
12606
12607 static void __exit bnxt_exit(void)
12608 {
12609         pci_unregister_driver(&bnxt_pci_driver);
12610         if (bnxt_pf_wq)
12611                 destroy_workqueue(bnxt_pf_wq);
12612         bnxt_debug_exit();
12613 }
12614
12615 module_init(bnxt_init);
12616 module_exit(bnxt_exit);