Merge tag 'soundwire-5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul...
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
58
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_ulp.h"
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
64 #include "bnxt_dcb.h"
65 #include "bnxt_xdp.h"
66 #include "bnxt_vfr.h"
67 #include "bnxt_tc.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
70
71 #define BNXT_TX_TIMEOUT         (5 * HZ)
72
73 MODULE_LICENSE("GPL");
74 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
75
76 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
77 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
78 #define BNXT_RX_COPY_THRESH 256
79
80 #define BNXT_TX_PUSH_THRESH 164
81
82 enum board_idx {
83         BCM57301,
84         BCM57302,
85         BCM57304,
86         BCM57417_NPAR,
87         BCM58700,
88         BCM57311,
89         BCM57312,
90         BCM57402,
91         BCM57404,
92         BCM57406,
93         BCM57402_NPAR,
94         BCM57407,
95         BCM57412,
96         BCM57414,
97         BCM57416,
98         BCM57417,
99         BCM57412_NPAR,
100         BCM57314,
101         BCM57417_SFP,
102         BCM57416_SFP,
103         BCM57404_NPAR,
104         BCM57406_NPAR,
105         BCM57407_SFP,
106         BCM57407_NPAR,
107         BCM57414_NPAR,
108         BCM57416_NPAR,
109         BCM57452,
110         BCM57454,
111         BCM5745x_NPAR,
112         BCM57508,
113         BCM57504,
114         BCM57502,
115         BCM57508_NPAR,
116         BCM57504_NPAR,
117         BCM57502_NPAR,
118         BCM58802,
119         BCM58804,
120         BCM58808,
121         NETXTREME_E_VF,
122         NETXTREME_C_VF,
123         NETXTREME_S_VF,
124         NETXTREME_E_P5_VF,
125 };
126
127 /* indexed by enum above */
128 static const struct {
129         char *name;
130 } board_info[] = {
131         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
132         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
133         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
134         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
135         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
136         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
137         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
138         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
139         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
140         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
141         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
142         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
143         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
144         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
145         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
146         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
147         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
148         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
149         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
150         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
151         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
152         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
153         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
154         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
155         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
156         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
157         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
158         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
159         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
160         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
161         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
162         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
163         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
164         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
165         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
166         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
167         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
168         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
169         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
170         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
171         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
172         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
173 };
174
175 static const struct pci_device_id bnxt_pci_tbl[] = {
176         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
177         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
178         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
179         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
180         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
181         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
182         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
183         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
184         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
185         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
186         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
187         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
188         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
189         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
190         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
191         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
192         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
193         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
194         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
195         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
196         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
197         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
198         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
199         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
200         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
201         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
202         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
203         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
204         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
205         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
206         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
207         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
208         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
209         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
210         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
211         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
212         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
213         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
214         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
215         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
216         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
217         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
218         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
219         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
220         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
221         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
222 #ifdef CONFIG_BNXT_SRIOV
223         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
224         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
225         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
226         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
227         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
228         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
229         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
230         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
231         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
232         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
233         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
234 #endif
235         { 0 }
236 };
237
238 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
239
240 static const u16 bnxt_vf_req_snif[] = {
241         HWRM_FUNC_CFG,
242         HWRM_FUNC_VF_CFG,
243         HWRM_PORT_PHY_QCFG,
244         HWRM_CFA_L2_FILTER_ALLOC,
245 };
246
247 static const u16 bnxt_async_events_arr[] = {
248         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
249         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
250         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
251         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
252         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
253         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
254         ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
255         ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
256         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
257 };
258
259 static struct workqueue_struct *bnxt_pf_wq;
260
261 static bool bnxt_vf_pciid(enum board_idx idx)
262 {
263         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
264                 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
265 }
266
267 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
268 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
269 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
270
271 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
272                 writel(DB_CP_IRQ_DIS_FLAGS, db)
273
274 #define BNXT_DB_CQ(db, idx)                                             \
275         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
276
277 #define BNXT_DB_NQ_P5(db, idx)                                          \
278         writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
279
280 #define BNXT_DB_CQ_ARM(db, idx)                                         \
281         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
282
283 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
284         writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
285
286 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
287 {
288         if (bp->flags & BNXT_FLAG_CHIP_P5)
289                 BNXT_DB_NQ_P5(db, idx);
290         else
291                 BNXT_DB_CQ(db, idx);
292 }
293
294 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
295 {
296         if (bp->flags & BNXT_FLAG_CHIP_P5)
297                 BNXT_DB_NQ_ARM_P5(db, idx);
298         else
299                 BNXT_DB_CQ_ARM(db, idx);
300 }
301
302 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
303 {
304         if (bp->flags & BNXT_FLAG_CHIP_P5)
305                 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
306                        db->doorbell);
307         else
308                 BNXT_DB_CQ(db, idx);
309 }
310
311 const u16 bnxt_lhint_arr[] = {
312         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
313         TX_BD_FLAGS_LHINT_512_TO_1023,
314         TX_BD_FLAGS_LHINT_1024_TO_2047,
315         TX_BD_FLAGS_LHINT_1024_TO_2047,
316         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
317         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
318         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
319         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
320         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
321         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
322         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
323         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
324         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
325         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
326         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
327         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
328         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
329         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
330         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
331 };
332
333 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
334 {
335         struct metadata_dst *md_dst = skb_metadata_dst(skb);
336
337         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
338                 return 0;
339
340         return md_dst->u.port_info.port_id;
341 }
342
343 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
344 {
345         struct bnxt *bp = netdev_priv(dev);
346         struct tx_bd *txbd;
347         struct tx_bd_ext *txbd1;
348         struct netdev_queue *txq;
349         int i;
350         dma_addr_t mapping;
351         unsigned int length, pad = 0;
352         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
353         u16 prod, last_frag;
354         struct pci_dev *pdev = bp->pdev;
355         struct bnxt_tx_ring_info *txr;
356         struct bnxt_sw_tx_bd *tx_buf;
357
358         i = skb_get_queue_mapping(skb);
359         if (unlikely(i >= bp->tx_nr_rings)) {
360                 dev_kfree_skb_any(skb);
361                 return NETDEV_TX_OK;
362         }
363
364         txq = netdev_get_tx_queue(dev, i);
365         txr = &bp->tx_ring[bp->tx_ring_map[i]];
366         prod = txr->tx_prod;
367
368         free_size = bnxt_tx_avail(bp, txr);
369         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
370                 netif_tx_stop_queue(txq);
371                 return NETDEV_TX_BUSY;
372         }
373
374         length = skb->len;
375         len = skb_headlen(skb);
376         last_frag = skb_shinfo(skb)->nr_frags;
377
378         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
379
380         txbd->tx_bd_opaque = prod;
381
382         tx_buf = &txr->tx_buf_ring[prod];
383         tx_buf->skb = skb;
384         tx_buf->nr_frags = last_frag;
385
386         vlan_tag_flags = 0;
387         cfa_action = bnxt_xmit_get_cfa_action(skb);
388         if (skb_vlan_tag_present(skb)) {
389                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
390                                  skb_vlan_tag_get(skb);
391                 /* Currently supports 8021Q, 8021AD vlan offloads
392                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
393                  */
394                 if (skb->vlan_proto == htons(ETH_P_8021Q))
395                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
396         }
397
398         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
399                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
400                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
401                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
402                 void __iomem *db = txr->tx_db.doorbell;
403                 void *pdata = tx_push_buf->data;
404                 u64 *end;
405                 int j, push_len;
406
407                 /* Set COAL_NOW to be ready quickly for the next push */
408                 tx_push->tx_bd_len_flags_type =
409                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
410                                         TX_BD_TYPE_LONG_TX_BD |
411                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
412                                         TX_BD_FLAGS_COAL_NOW |
413                                         TX_BD_FLAGS_PACKET_END |
414                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
415
416                 if (skb->ip_summed == CHECKSUM_PARTIAL)
417                         tx_push1->tx_bd_hsize_lflags =
418                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
419                 else
420                         tx_push1->tx_bd_hsize_lflags = 0;
421
422                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
423                 tx_push1->tx_bd_cfa_action =
424                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
425
426                 end = pdata + length;
427                 end = PTR_ALIGN(end, 8) - 1;
428                 *end = 0;
429
430                 skb_copy_from_linear_data(skb, pdata, len);
431                 pdata += len;
432                 for (j = 0; j < last_frag; j++) {
433                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
434                         void *fptr;
435
436                         fptr = skb_frag_address_safe(frag);
437                         if (!fptr)
438                                 goto normal_tx;
439
440                         memcpy(pdata, fptr, skb_frag_size(frag));
441                         pdata += skb_frag_size(frag);
442                 }
443
444                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
445                 txbd->tx_bd_haddr = txr->data_mapping;
446                 prod = NEXT_TX(prod);
447                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
448                 memcpy(txbd, tx_push1, sizeof(*txbd));
449                 prod = NEXT_TX(prod);
450                 tx_push->doorbell =
451                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
452                 txr->tx_prod = prod;
453
454                 tx_buf->is_push = 1;
455                 netdev_tx_sent_queue(txq, skb->len);
456                 wmb();  /* Sync is_push and byte queue before pushing data */
457
458                 push_len = (length + sizeof(*tx_push) + 7) / 8;
459                 if (push_len > 16) {
460                         __iowrite64_copy(db, tx_push_buf, 16);
461                         __iowrite32_copy(db + 4, tx_push_buf + 1,
462                                          (push_len - 16) << 1);
463                 } else {
464                         __iowrite64_copy(db, tx_push_buf, push_len);
465                 }
466
467                 goto tx_done;
468         }
469
470 normal_tx:
471         if (length < BNXT_MIN_PKT_SIZE) {
472                 pad = BNXT_MIN_PKT_SIZE - length;
473                 if (skb_pad(skb, pad)) {
474                         /* SKB already freed. */
475                         tx_buf->skb = NULL;
476                         return NETDEV_TX_OK;
477                 }
478                 length = BNXT_MIN_PKT_SIZE;
479         }
480
481         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
482
483         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
484                 dev_kfree_skb_any(skb);
485                 tx_buf->skb = NULL;
486                 return NETDEV_TX_OK;
487         }
488
489         dma_unmap_addr_set(tx_buf, mapping, mapping);
490         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
491                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
492
493         txbd->tx_bd_haddr = cpu_to_le64(mapping);
494
495         prod = NEXT_TX(prod);
496         txbd1 = (struct tx_bd_ext *)
497                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
498
499         txbd1->tx_bd_hsize_lflags = 0;
500         if (skb_is_gso(skb)) {
501                 u32 hdr_len;
502
503                 if (skb->encapsulation)
504                         hdr_len = skb_inner_network_offset(skb) +
505                                 skb_inner_network_header_len(skb) +
506                                 inner_tcp_hdrlen(skb);
507                 else
508                         hdr_len = skb_transport_offset(skb) +
509                                 tcp_hdrlen(skb);
510
511                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
512                                         TX_BD_FLAGS_T_IPID |
513                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
514                 length = skb_shinfo(skb)->gso_size;
515                 txbd1->tx_bd_mss = cpu_to_le32(length);
516                 length += hdr_len;
517         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
518                 txbd1->tx_bd_hsize_lflags =
519                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
520                 txbd1->tx_bd_mss = 0;
521         }
522
523         length >>= 9;
524         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
525                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
526                                      skb->len);
527                 i = 0;
528                 goto tx_dma_error;
529         }
530         flags |= bnxt_lhint_arr[length];
531         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
532
533         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
534         txbd1->tx_bd_cfa_action =
535                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
536         for (i = 0; i < last_frag; i++) {
537                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
538
539                 prod = NEXT_TX(prod);
540                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
541
542                 len = skb_frag_size(frag);
543                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
544                                            DMA_TO_DEVICE);
545
546                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
547                         goto tx_dma_error;
548
549                 tx_buf = &txr->tx_buf_ring[prod];
550                 dma_unmap_addr_set(tx_buf, mapping, mapping);
551
552                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
553
554                 flags = len << TX_BD_LEN_SHIFT;
555                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
556         }
557
558         flags &= ~TX_BD_LEN;
559         txbd->tx_bd_len_flags_type =
560                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
561                             TX_BD_FLAGS_PACKET_END);
562
563         netdev_tx_sent_queue(txq, skb->len);
564
565         /* Sync BD data before updating doorbell */
566         wmb();
567
568         prod = NEXT_TX(prod);
569         txr->tx_prod = prod;
570
571         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
572                 bnxt_db_write(bp, &txr->tx_db, prod);
573
574 tx_done:
575
576         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
577                 if (netdev_xmit_more() && !tx_buf->is_push)
578                         bnxt_db_write(bp, &txr->tx_db, prod);
579
580                 netif_tx_stop_queue(txq);
581
582                 /* netif_tx_stop_queue() must be done before checking
583                  * tx index in bnxt_tx_avail() below, because in
584                  * bnxt_tx_int(), we update tx index before checking for
585                  * netif_tx_queue_stopped().
586                  */
587                 smp_mb();
588                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
589                         netif_tx_wake_queue(txq);
590         }
591         return NETDEV_TX_OK;
592
593 tx_dma_error:
594         last_frag = i;
595
596         /* start back at beginning and unmap skb */
597         prod = txr->tx_prod;
598         tx_buf = &txr->tx_buf_ring[prod];
599         tx_buf->skb = NULL;
600         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
601                          skb_headlen(skb), PCI_DMA_TODEVICE);
602         prod = NEXT_TX(prod);
603
604         /* unmap remaining mapped pages */
605         for (i = 0; i < last_frag; i++) {
606                 prod = NEXT_TX(prod);
607                 tx_buf = &txr->tx_buf_ring[prod];
608                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
609                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
610                                PCI_DMA_TODEVICE);
611         }
612
613         dev_kfree_skb_any(skb);
614         return NETDEV_TX_OK;
615 }
616
617 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
618 {
619         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
620         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
621         u16 cons = txr->tx_cons;
622         struct pci_dev *pdev = bp->pdev;
623         int i;
624         unsigned int tx_bytes = 0;
625
626         for (i = 0; i < nr_pkts; i++) {
627                 struct bnxt_sw_tx_bd *tx_buf;
628                 struct sk_buff *skb;
629                 int j, last;
630
631                 tx_buf = &txr->tx_buf_ring[cons];
632                 cons = NEXT_TX(cons);
633                 skb = tx_buf->skb;
634                 tx_buf->skb = NULL;
635
636                 if (tx_buf->is_push) {
637                         tx_buf->is_push = 0;
638                         goto next_tx_int;
639                 }
640
641                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
642                                  skb_headlen(skb), PCI_DMA_TODEVICE);
643                 last = tx_buf->nr_frags;
644
645                 for (j = 0; j < last; j++) {
646                         cons = NEXT_TX(cons);
647                         tx_buf = &txr->tx_buf_ring[cons];
648                         dma_unmap_page(
649                                 &pdev->dev,
650                                 dma_unmap_addr(tx_buf, mapping),
651                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
652                                 PCI_DMA_TODEVICE);
653                 }
654
655 next_tx_int:
656                 cons = NEXT_TX(cons);
657
658                 tx_bytes += skb->len;
659                 dev_kfree_skb_any(skb);
660         }
661
662         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
663         txr->tx_cons = cons;
664
665         /* Need to make the tx_cons update visible to bnxt_start_xmit()
666          * before checking for netif_tx_queue_stopped().  Without the
667          * memory barrier, there is a small possibility that bnxt_start_xmit()
668          * will miss it and cause the queue to be stopped forever.
669          */
670         smp_mb();
671
672         if (unlikely(netif_tx_queue_stopped(txq)) &&
673             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
674                 __netif_tx_lock(txq, smp_processor_id());
675                 if (netif_tx_queue_stopped(txq) &&
676                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
677                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
678                         netif_tx_wake_queue(txq);
679                 __netif_tx_unlock(txq);
680         }
681 }
682
683 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
684                                          struct bnxt_rx_ring_info *rxr,
685                                          gfp_t gfp)
686 {
687         struct device *dev = &bp->pdev->dev;
688         struct page *page;
689
690         page = page_pool_dev_alloc_pages(rxr->page_pool);
691         if (!page)
692                 return NULL;
693
694         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
695                                       DMA_ATTR_WEAK_ORDERING);
696         if (dma_mapping_error(dev, *mapping)) {
697                 page_pool_recycle_direct(rxr->page_pool, page);
698                 return NULL;
699         }
700         *mapping += bp->rx_dma_offset;
701         return page;
702 }
703
704 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
705                                        gfp_t gfp)
706 {
707         u8 *data;
708         struct pci_dev *pdev = bp->pdev;
709
710         data = kmalloc(bp->rx_buf_size, gfp);
711         if (!data)
712                 return NULL;
713
714         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
715                                         bp->rx_buf_use_size, bp->rx_dir,
716                                         DMA_ATTR_WEAK_ORDERING);
717
718         if (dma_mapping_error(&pdev->dev, *mapping)) {
719                 kfree(data);
720                 data = NULL;
721         }
722         return data;
723 }
724
725 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
726                        u16 prod, gfp_t gfp)
727 {
728         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
729         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
730         dma_addr_t mapping;
731
732         if (BNXT_RX_PAGE_MODE(bp)) {
733                 struct page *page =
734                         __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
735
736                 if (!page)
737                         return -ENOMEM;
738
739                 rx_buf->data = page;
740                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
741         } else {
742                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
743
744                 if (!data)
745                         return -ENOMEM;
746
747                 rx_buf->data = data;
748                 rx_buf->data_ptr = data + bp->rx_offset;
749         }
750         rx_buf->mapping = mapping;
751
752         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
753         return 0;
754 }
755
756 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
757 {
758         u16 prod = rxr->rx_prod;
759         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
760         struct rx_bd *cons_bd, *prod_bd;
761
762         prod_rx_buf = &rxr->rx_buf_ring[prod];
763         cons_rx_buf = &rxr->rx_buf_ring[cons];
764
765         prod_rx_buf->data = data;
766         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
767
768         prod_rx_buf->mapping = cons_rx_buf->mapping;
769
770         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
771         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
772
773         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
774 }
775
776 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
777 {
778         u16 next, max = rxr->rx_agg_bmap_size;
779
780         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
781         if (next >= max)
782                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
783         return next;
784 }
785
786 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
787                                      struct bnxt_rx_ring_info *rxr,
788                                      u16 prod, gfp_t gfp)
789 {
790         struct rx_bd *rxbd =
791                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
792         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
793         struct pci_dev *pdev = bp->pdev;
794         struct page *page;
795         dma_addr_t mapping;
796         u16 sw_prod = rxr->rx_sw_agg_prod;
797         unsigned int offset = 0;
798
799         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
800                 page = rxr->rx_page;
801                 if (!page) {
802                         page = alloc_page(gfp);
803                         if (!page)
804                                 return -ENOMEM;
805                         rxr->rx_page = page;
806                         rxr->rx_page_offset = 0;
807                 }
808                 offset = rxr->rx_page_offset;
809                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
810                 if (rxr->rx_page_offset == PAGE_SIZE)
811                         rxr->rx_page = NULL;
812                 else
813                         get_page(page);
814         } else {
815                 page = alloc_page(gfp);
816                 if (!page)
817                         return -ENOMEM;
818         }
819
820         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
821                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
822                                      DMA_ATTR_WEAK_ORDERING);
823         if (dma_mapping_error(&pdev->dev, mapping)) {
824                 __free_page(page);
825                 return -EIO;
826         }
827
828         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
829                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
830
831         __set_bit(sw_prod, rxr->rx_agg_bmap);
832         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
833         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
834
835         rx_agg_buf->page = page;
836         rx_agg_buf->offset = offset;
837         rx_agg_buf->mapping = mapping;
838         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
839         rxbd->rx_bd_opaque = sw_prod;
840         return 0;
841 }
842
843 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
844                                        struct bnxt_cp_ring_info *cpr,
845                                        u16 cp_cons, u16 curr)
846 {
847         struct rx_agg_cmp *agg;
848
849         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
850         agg = (struct rx_agg_cmp *)
851                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
852         return agg;
853 }
854
855 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
856                                               struct bnxt_rx_ring_info *rxr,
857                                               u16 agg_id, u16 curr)
858 {
859         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
860
861         return &tpa_info->agg_arr[curr];
862 }
863
864 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
865                                    u16 start, u32 agg_bufs, bool tpa)
866 {
867         struct bnxt_napi *bnapi = cpr->bnapi;
868         struct bnxt *bp = bnapi->bp;
869         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
870         u16 prod = rxr->rx_agg_prod;
871         u16 sw_prod = rxr->rx_sw_agg_prod;
872         bool p5_tpa = false;
873         u32 i;
874
875         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
876                 p5_tpa = true;
877
878         for (i = 0; i < agg_bufs; i++) {
879                 u16 cons;
880                 struct rx_agg_cmp *agg;
881                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
882                 struct rx_bd *prod_bd;
883                 struct page *page;
884
885                 if (p5_tpa)
886                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
887                 else
888                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
889                 cons = agg->rx_agg_cmp_opaque;
890                 __clear_bit(cons, rxr->rx_agg_bmap);
891
892                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
893                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
894
895                 __set_bit(sw_prod, rxr->rx_agg_bmap);
896                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
897                 cons_rx_buf = &rxr->rx_agg_ring[cons];
898
899                 /* It is possible for sw_prod to be equal to cons, so
900                  * set cons_rx_buf->page to NULL first.
901                  */
902                 page = cons_rx_buf->page;
903                 cons_rx_buf->page = NULL;
904                 prod_rx_buf->page = page;
905                 prod_rx_buf->offset = cons_rx_buf->offset;
906
907                 prod_rx_buf->mapping = cons_rx_buf->mapping;
908
909                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
910
911                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
912                 prod_bd->rx_bd_opaque = sw_prod;
913
914                 prod = NEXT_RX_AGG(prod);
915                 sw_prod = NEXT_RX_AGG(sw_prod);
916         }
917         rxr->rx_agg_prod = prod;
918         rxr->rx_sw_agg_prod = sw_prod;
919 }
920
921 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
922                                         struct bnxt_rx_ring_info *rxr,
923                                         u16 cons, void *data, u8 *data_ptr,
924                                         dma_addr_t dma_addr,
925                                         unsigned int offset_and_len)
926 {
927         unsigned int payload = offset_and_len >> 16;
928         unsigned int len = offset_and_len & 0xffff;
929         skb_frag_t *frag;
930         struct page *page = data;
931         u16 prod = rxr->rx_prod;
932         struct sk_buff *skb;
933         int off, err;
934
935         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
936         if (unlikely(err)) {
937                 bnxt_reuse_rx_data(rxr, cons, data);
938                 return NULL;
939         }
940         dma_addr -= bp->rx_dma_offset;
941         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
942                              DMA_ATTR_WEAK_ORDERING);
943         page_pool_release_page(rxr->page_pool, page);
944
945         if (unlikely(!payload))
946                 payload = eth_get_headlen(bp->dev, data_ptr, len);
947
948         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
949         if (!skb) {
950                 __free_page(page);
951                 return NULL;
952         }
953
954         off = (void *)data_ptr - page_address(page);
955         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
956         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
957                payload + NET_IP_ALIGN);
958
959         frag = &skb_shinfo(skb)->frags[0];
960         skb_frag_size_sub(frag, payload);
961         skb_frag_off_add(frag, payload);
962         skb->data_len -= payload;
963         skb->tail += payload;
964
965         return skb;
966 }
967
968 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
969                                    struct bnxt_rx_ring_info *rxr, u16 cons,
970                                    void *data, u8 *data_ptr,
971                                    dma_addr_t dma_addr,
972                                    unsigned int offset_and_len)
973 {
974         u16 prod = rxr->rx_prod;
975         struct sk_buff *skb;
976         int err;
977
978         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
979         if (unlikely(err)) {
980                 bnxt_reuse_rx_data(rxr, cons, data);
981                 return NULL;
982         }
983
984         skb = build_skb(data, 0);
985         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
986                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
987         if (!skb) {
988                 kfree(data);
989                 return NULL;
990         }
991
992         skb_reserve(skb, bp->rx_offset);
993         skb_put(skb, offset_and_len & 0xffff);
994         return skb;
995 }
996
997 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
998                                      struct bnxt_cp_ring_info *cpr,
999                                      struct sk_buff *skb, u16 idx,
1000                                      u32 agg_bufs, bool tpa)
1001 {
1002         struct bnxt_napi *bnapi = cpr->bnapi;
1003         struct pci_dev *pdev = bp->pdev;
1004         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1005         u16 prod = rxr->rx_agg_prod;
1006         bool p5_tpa = false;
1007         u32 i;
1008
1009         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1010                 p5_tpa = true;
1011
1012         for (i = 0; i < agg_bufs; i++) {
1013                 u16 cons, frag_len;
1014                 struct rx_agg_cmp *agg;
1015                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1016                 struct page *page;
1017                 dma_addr_t mapping;
1018
1019                 if (p5_tpa)
1020                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1021                 else
1022                         agg = bnxt_get_agg(bp, cpr, idx, i);
1023                 cons = agg->rx_agg_cmp_opaque;
1024                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1025                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1026
1027                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1028                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1029                                    cons_rx_buf->offset, frag_len);
1030                 __clear_bit(cons, rxr->rx_agg_bmap);
1031
1032                 /* It is possible for bnxt_alloc_rx_page() to allocate
1033                  * a sw_prod index that equals the cons index, so we
1034                  * need to clear the cons entry now.
1035                  */
1036                 mapping = cons_rx_buf->mapping;
1037                 page = cons_rx_buf->page;
1038                 cons_rx_buf->page = NULL;
1039
1040                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1041                         struct skb_shared_info *shinfo;
1042                         unsigned int nr_frags;
1043
1044                         shinfo = skb_shinfo(skb);
1045                         nr_frags = --shinfo->nr_frags;
1046                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1047
1048                         dev_kfree_skb(skb);
1049
1050                         cons_rx_buf->page = page;
1051
1052                         /* Update prod since possibly some pages have been
1053                          * allocated already.
1054                          */
1055                         rxr->rx_agg_prod = prod;
1056                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1057                         return NULL;
1058                 }
1059
1060                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1061                                      PCI_DMA_FROMDEVICE,
1062                                      DMA_ATTR_WEAK_ORDERING);
1063
1064                 skb->data_len += frag_len;
1065                 skb->len += frag_len;
1066                 skb->truesize += PAGE_SIZE;
1067
1068                 prod = NEXT_RX_AGG(prod);
1069         }
1070         rxr->rx_agg_prod = prod;
1071         return skb;
1072 }
1073
1074 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1075                                u8 agg_bufs, u32 *raw_cons)
1076 {
1077         u16 last;
1078         struct rx_agg_cmp *agg;
1079
1080         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1081         last = RING_CMP(*raw_cons);
1082         agg = (struct rx_agg_cmp *)
1083                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1084         return RX_AGG_CMP_VALID(agg, *raw_cons);
1085 }
1086
1087 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1088                                             unsigned int len,
1089                                             dma_addr_t mapping)
1090 {
1091         struct bnxt *bp = bnapi->bp;
1092         struct pci_dev *pdev = bp->pdev;
1093         struct sk_buff *skb;
1094
1095         skb = napi_alloc_skb(&bnapi->napi, len);
1096         if (!skb)
1097                 return NULL;
1098
1099         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1100                                 bp->rx_dir);
1101
1102         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1103                len + NET_IP_ALIGN);
1104
1105         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1106                                    bp->rx_dir);
1107
1108         skb_put(skb, len);
1109         return skb;
1110 }
1111
1112 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1113                            u32 *raw_cons, void *cmp)
1114 {
1115         struct rx_cmp *rxcmp = cmp;
1116         u32 tmp_raw_cons = *raw_cons;
1117         u8 cmp_type, agg_bufs = 0;
1118
1119         cmp_type = RX_CMP_TYPE(rxcmp);
1120
1121         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1122                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1123                             RX_CMP_AGG_BUFS) >>
1124                            RX_CMP_AGG_BUFS_SHIFT;
1125         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1126                 struct rx_tpa_end_cmp *tpa_end = cmp;
1127
1128                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1129                         return 0;
1130
1131                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1132         }
1133
1134         if (agg_bufs) {
1135                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1136                         return -EBUSY;
1137         }
1138         *raw_cons = tmp_raw_cons;
1139         return 0;
1140 }
1141
1142 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1143 {
1144         if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1145                 return;
1146
1147         if (BNXT_PF(bp))
1148                 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1149         else
1150                 schedule_delayed_work(&bp->fw_reset_task, delay);
1151 }
1152
1153 static void bnxt_queue_sp_work(struct bnxt *bp)
1154 {
1155         if (BNXT_PF(bp))
1156                 queue_work(bnxt_pf_wq, &bp->sp_task);
1157         else
1158                 schedule_work(&bp->sp_task);
1159 }
1160
1161 static void bnxt_cancel_sp_work(struct bnxt *bp)
1162 {
1163         if (BNXT_PF(bp)) {
1164                 flush_workqueue(bnxt_pf_wq);
1165         } else {
1166                 cancel_work_sync(&bp->sp_task);
1167                 cancel_delayed_work_sync(&bp->fw_reset_task);
1168         }
1169 }
1170
1171 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1172 {
1173         if (!rxr->bnapi->in_reset) {
1174                 rxr->bnapi->in_reset = true;
1175                 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1176                 bnxt_queue_sp_work(bp);
1177         }
1178         rxr->rx_next_cons = 0xffff;
1179 }
1180
1181 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1182 {
1183         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1184         u16 idx = agg_id & MAX_TPA_P5_MASK;
1185
1186         if (test_bit(idx, map->agg_idx_bmap))
1187                 idx = find_first_zero_bit(map->agg_idx_bmap,
1188                                           BNXT_AGG_IDX_BMAP_SIZE);
1189         __set_bit(idx, map->agg_idx_bmap);
1190         map->agg_id_tbl[agg_id] = idx;
1191         return idx;
1192 }
1193
1194 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1195 {
1196         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1197
1198         __clear_bit(idx, map->agg_idx_bmap);
1199 }
1200
1201 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1202 {
1203         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1204
1205         return map->agg_id_tbl[agg_id];
1206 }
1207
1208 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1209                            struct rx_tpa_start_cmp *tpa_start,
1210                            struct rx_tpa_start_cmp_ext *tpa_start1)
1211 {
1212         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1213         struct bnxt_tpa_info *tpa_info;
1214         u16 cons, prod, agg_id;
1215         struct rx_bd *prod_bd;
1216         dma_addr_t mapping;
1217
1218         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1219                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1220                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1221         } else {
1222                 agg_id = TPA_START_AGG_ID(tpa_start);
1223         }
1224         cons = tpa_start->rx_tpa_start_cmp_opaque;
1225         prod = rxr->rx_prod;
1226         cons_rx_buf = &rxr->rx_buf_ring[cons];
1227         prod_rx_buf = &rxr->rx_buf_ring[prod];
1228         tpa_info = &rxr->rx_tpa[agg_id];
1229
1230         if (unlikely(cons != rxr->rx_next_cons ||
1231                      TPA_START_ERROR(tpa_start))) {
1232                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1233                             cons, rxr->rx_next_cons,
1234                             TPA_START_ERROR_CODE(tpa_start1));
1235                 bnxt_sched_reset(bp, rxr);
1236                 return;
1237         }
1238         /* Store cfa_code in tpa_info to use in tpa_end
1239          * completion processing.
1240          */
1241         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1242         prod_rx_buf->data = tpa_info->data;
1243         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1244
1245         mapping = tpa_info->mapping;
1246         prod_rx_buf->mapping = mapping;
1247
1248         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1249
1250         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1251
1252         tpa_info->data = cons_rx_buf->data;
1253         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1254         cons_rx_buf->data = NULL;
1255         tpa_info->mapping = cons_rx_buf->mapping;
1256
1257         tpa_info->len =
1258                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1259                                 RX_TPA_START_CMP_LEN_SHIFT;
1260         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1261                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1262
1263                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1264                 tpa_info->gso_type = SKB_GSO_TCPV4;
1265                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1266                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1267                         tpa_info->gso_type = SKB_GSO_TCPV6;
1268                 tpa_info->rss_hash =
1269                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1270         } else {
1271                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1272                 tpa_info->gso_type = 0;
1273                 if (netif_msg_rx_err(bp))
1274                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
1275         }
1276         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1277         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1278         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1279         tpa_info->agg_count = 0;
1280
1281         rxr->rx_prod = NEXT_RX(prod);
1282         cons = NEXT_RX(cons);
1283         rxr->rx_next_cons = NEXT_RX(cons);
1284         cons_rx_buf = &rxr->rx_buf_ring[cons];
1285
1286         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1287         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1288         cons_rx_buf->data = NULL;
1289 }
1290
1291 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1292 {
1293         if (agg_bufs)
1294                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1295 }
1296
1297 #ifdef CONFIG_INET
1298 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1299 {
1300         struct udphdr *uh = NULL;
1301
1302         if (ip_proto == htons(ETH_P_IP)) {
1303                 struct iphdr *iph = (struct iphdr *)skb->data;
1304
1305                 if (iph->protocol == IPPROTO_UDP)
1306                         uh = (struct udphdr *)(iph + 1);
1307         } else {
1308                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1309
1310                 if (iph->nexthdr == IPPROTO_UDP)
1311                         uh = (struct udphdr *)(iph + 1);
1312         }
1313         if (uh) {
1314                 if (uh->check)
1315                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1316                 else
1317                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1318         }
1319 }
1320 #endif
1321
1322 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1323                                            int payload_off, int tcp_ts,
1324                                            struct sk_buff *skb)
1325 {
1326 #ifdef CONFIG_INET
1327         struct tcphdr *th;
1328         int len, nw_off;
1329         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1330         u32 hdr_info = tpa_info->hdr_info;
1331         bool loopback = false;
1332
1333         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1334         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1335         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1336
1337         /* If the packet is an internal loopback packet, the offsets will
1338          * have an extra 4 bytes.
1339          */
1340         if (inner_mac_off == 4) {
1341                 loopback = true;
1342         } else if (inner_mac_off > 4) {
1343                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1344                                             ETH_HLEN - 2));
1345
1346                 /* We only support inner iPv4/ipv6.  If we don't see the
1347                  * correct protocol ID, it must be a loopback packet where
1348                  * the offsets are off by 4.
1349                  */
1350                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1351                         loopback = true;
1352         }
1353         if (loopback) {
1354                 /* internal loopback packet, subtract all offsets by 4 */
1355                 inner_ip_off -= 4;
1356                 inner_mac_off -= 4;
1357                 outer_ip_off -= 4;
1358         }
1359
1360         nw_off = inner_ip_off - ETH_HLEN;
1361         skb_set_network_header(skb, nw_off);
1362         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1363                 struct ipv6hdr *iph = ipv6_hdr(skb);
1364
1365                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1366                 len = skb->len - skb_transport_offset(skb);
1367                 th = tcp_hdr(skb);
1368                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1369         } else {
1370                 struct iphdr *iph = ip_hdr(skb);
1371
1372                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1373                 len = skb->len - skb_transport_offset(skb);
1374                 th = tcp_hdr(skb);
1375                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1376         }
1377
1378         if (inner_mac_off) { /* tunnel */
1379                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1380                                             ETH_HLEN - 2));
1381
1382                 bnxt_gro_tunnel(skb, proto);
1383         }
1384 #endif
1385         return skb;
1386 }
1387
1388 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1389                                            int payload_off, int tcp_ts,
1390                                            struct sk_buff *skb)
1391 {
1392 #ifdef CONFIG_INET
1393         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1394         u32 hdr_info = tpa_info->hdr_info;
1395         int iphdr_len, nw_off;
1396
1397         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1398         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1399         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1400
1401         nw_off = inner_ip_off - ETH_HLEN;
1402         skb_set_network_header(skb, nw_off);
1403         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1404                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1405         skb_set_transport_header(skb, nw_off + iphdr_len);
1406
1407         if (inner_mac_off) { /* tunnel */
1408                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1409                                             ETH_HLEN - 2));
1410
1411                 bnxt_gro_tunnel(skb, proto);
1412         }
1413 #endif
1414         return skb;
1415 }
1416
1417 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1418 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1419
1420 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1421                                            int payload_off, int tcp_ts,
1422                                            struct sk_buff *skb)
1423 {
1424 #ifdef CONFIG_INET
1425         struct tcphdr *th;
1426         int len, nw_off, tcp_opt_len = 0;
1427
1428         if (tcp_ts)
1429                 tcp_opt_len = 12;
1430
1431         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1432                 struct iphdr *iph;
1433
1434                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1435                          ETH_HLEN;
1436                 skb_set_network_header(skb, nw_off);
1437                 iph = ip_hdr(skb);
1438                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1439                 len = skb->len - skb_transport_offset(skb);
1440                 th = tcp_hdr(skb);
1441                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1442         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1443                 struct ipv6hdr *iph;
1444
1445                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1446                          ETH_HLEN;
1447                 skb_set_network_header(skb, nw_off);
1448                 iph = ipv6_hdr(skb);
1449                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1450                 len = skb->len - skb_transport_offset(skb);
1451                 th = tcp_hdr(skb);
1452                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1453         } else {
1454                 dev_kfree_skb_any(skb);
1455                 return NULL;
1456         }
1457
1458         if (nw_off) /* tunnel */
1459                 bnxt_gro_tunnel(skb, skb->protocol);
1460 #endif
1461         return skb;
1462 }
1463
1464 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1465                                            struct bnxt_tpa_info *tpa_info,
1466                                            struct rx_tpa_end_cmp *tpa_end,
1467                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1468                                            struct sk_buff *skb)
1469 {
1470 #ifdef CONFIG_INET
1471         int payload_off;
1472         u16 segs;
1473
1474         segs = TPA_END_TPA_SEGS(tpa_end);
1475         if (segs == 1)
1476                 return skb;
1477
1478         NAPI_GRO_CB(skb)->count = segs;
1479         skb_shinfo(skb)->gso_size =
1480                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1481         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1482         if (bp->flags & BNXT_FLAG_CHIP_P5)
1483                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1484         else
1485                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1486         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1487         if (likely(skb))
1488                 tcp_gro_complete(skb);
1489 #endif
1490         return skb;
1491 }
1492
1493 /* Given the cfa_code of a received packet determine which
1494  * netdev (vf-rep or PF) the packet is destined to.
1495  */
1496 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1497 {
1498         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1499
1500         /* if vf-rep dev is NULL, the must belongs to the PF */
1501         return dev ? dev : bp->dev;
1502 }
1503
1504 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1505                                            struct bnxt_cp_ring_info *cpr,
1506                                            u32 *raw_cons,
1507                                            struct rx_tpa_end_cmp *tpa_end,
1508                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1509                                            u8 *event)
1510 {
1511         struct bnxt_napi *bnapi = cpr->bnapi;
1512         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1513         u8 *data_ptr, agg_bufs;
1514         unsigned int len;
1515         struct bnxt_tpa_info *tpa_info;
1516         dma_addr_t mapping;
1517         struct sk_buff *skb;
1518         u16 idx = 0, agg_id;
1519         void *data;
1520         bool gro;
1521
1522         if (unlikely(bnapi->in_reset)) {
1523                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1524
1525                 if (rc < 0)
1526                         return ERR_PTR(-EBUSY);
1527                 return NULL;
1528         }
1529
1530         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1531                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1532                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1533                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1534                 tpa_info = &rxr->rx_tpa[agg_id];
1535                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1536                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1537                                     agg_bufs, tpa_info->agg_count);
1538                         agg_bufs = tpa_info->agg_count;
1539                 }
1540                 tpa_info->agg_count = 0;
1541                 *event |= BNXT_AGG_EVENT;
1542                 bnxt_free_agg_idx(rxr, agg_id);
1543                 idx = agg_id;
1544                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1545         } else {
1546                 agg_id = TPA_END_AGG_ID(tpa_end);
1547                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1548                 tpa_info = &rxr->rx_tpa[agg_id];
1549                 idx = RING_CMP(*raw_cons);
1550                 if (agg_bufs) {
1551                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1552                                 return ERR_PTR(-EBUSY);
1553
1554                         *event |= BNXT_AGG_EVENT;
1555                         idx = NEXT_CMP(idx);
1556                 }
1557                 gro = !!TPA_END_GRO(tpa_end);
1558         }
1559         data = tpa_info->data;
1560         data_ptr = tpa_info->data_ptr;
1561         prefetch(data_ptr);
1562         len = tpa_info->len;
1563         mapping = tpa_info->mapping;
1564
1565         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1566                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1567                 if (agg_bufs > MAX_SKB_FRAGS)
1568                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1569                                     agg_bufs, (int)MAX_SKB_FRAGS);
1570                 return NULL;
1571         }
1572
1573         if (len <= bp->rx_copy_thresh) {
1574                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1575                 if (!skb) {
1576                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1577                         return NULL;
1578                 }
1579         } else {
1580                 u8 *new_data;
1581                 dma_addr_t new_mapping;
1582
1583                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1584                 if (!new_data) {
1585                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1586                         return NULL;
1587                 }
1588
1589                 tpa_info->data = new_data;
1590                 tpa_info->data_ptr = new_data + bp->rx_offset;
1591                 tpa_info->mapping = new_mapping;
1592
1593                 skb = build_skb(data, 0);
1594                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1595                                        bp->rx_buf_use_size, bp->rx_dir,
1596                                        DMA_ATTR_WEAK_ORDERING);
1597
1598                 if (!skb) {
1599                         kfree(data);
1600                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1601                         return NULL;
1602                 }
1603                 skb_reserve(skb, bp->rx_offset);
1604                 skb_put(skb, len);
1605         }
1606
1607         if (agg_bufs) {
1608                 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1609                 if (!skb) {
1610                         /* Page reuse already handled by bnxt_rx_pages(). */
1611                         return NULL;
1612                 }
1613         }
1614
1615         skb->protocol =
1616                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1617
1618         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1619                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1620
1621         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1622             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1623                 u16 vlan_proto = tpa_info->metadata >>
1624                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1625                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1626
1627                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1628         }
1629
1630         skb_checksum_none_assert(skb);
1631         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1632                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1633                 skb->csum_level =
1634                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1635         }
1636
1637         if (gro)
1638                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1639
1640         return skb;
1641 }
1642
1643 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1644                          struct rx_agg_cmp *rx_agg)
1645 {
1646         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1647         struct bnxt_tpa_info *tpa_info;
1648
1649         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1650         tpa_info = &rxr->rx_tpa[agg_id];
1651         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1652         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1653 }
1654
1655 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1656                              struct sk_buff *skb)
1657 {
1658         if (skb->dev != bp->dev) {
1659                 /* this packet belongs to a vf-rep */
1660                 bnxt_vf_rep_rx(bp, skb);
1661                 return;
1662         }
1663         skb_record_rx_queue(skb, bnapi->index);
1664         napi_gro_receive(&bnapi->napi, skb);
1665 }
1666
1667 /* returns the following:
1668  * 1       - 1 packet successfully received
1669  * 0       - successful TPA_START, packet not completed yet
1670  * -EBUSY  - completion ring does not have all the agg buffers yet
1671  * -ENOMEM - packet aborted due to out of memory
1672  * -EIO    - packet aborted due to hw error indicated in BD
1673  */
1674 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1675                        u32 *raw_cons, u8 *event)
1676 {
1677         struct bnxt_napi *bnapi = cpr->bnapi;
1678         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1679         struct net_device *dev = bp->dev;
1680         struct rx_cmp *rxcmp;
1681         struct rx_cmp_ext *rxcmp1;
1682         u32 tmp_raw_cons = *raw_cons;
1683         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1684         struct bnxt_sw_rx_bd *rx_buf;
1685         unsigned int len;
1686         u8 *data_ptr, agg_bufs, cmp_type;
1687         dma_addr_t dma_addr;
1688         struct sk_buff *skb;
1689         void *data;
1690         int rc = 0;
1691         u32 misc;
1692
1693         rxcmp = (struct rx_cmp *)
1694                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1695
1696         cmp_type = RX_CMP_TYPE(rxcmp);
1697
1698         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1699                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1700                 goto next_rx_no_prod_no_len;
1701         }
1702
1703         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1704         cp_cons = RING_CMP(tmp_raw_cons);
1705         rxcmp1 = (struct rx_cmp_ext *)
1706                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1707
1708         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1709                 return -EBUSY;
1710
1711         prod = rxr->rx_prod;
1712
1713         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1714                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1715                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1716
1717                 *event |= BNXT_RX_EVENT;
1718                 goto next_rx_no_prod_no_len;
1719
1720         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1721                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1722                                    (struct rx_tpa_end_cmp *)rxcmp,
1723                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1724
1725                 if (IS_ERR(skb))
1726                         return -EBUSY;
1727
1728                 rc = -ENOMEM;
1729                 if (likely(skb)) {
1730                         bnxt_deliver_skb(bp, bnapi, skb);
1731                         rc = 1;
1732                 }
1733                 *event |= BNXT_RX_EVENT;
1734                 goto next_rx_no_prod_no_len;
1735         }
1736
1737         cons = rxcmp->rx_cmp_opaque;
1738         if (unlikely(cons != rxr->rx_next_cons)) {
1739                 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1740
1741                 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1742                             cons, rxr->rx_next_cons);
1743                 bnxt_sched_reset(bp, rxr);
1744                 return rc1;
1745         }
1746         rx_buf = &rxr->rx_buf_ring[cons];
1747         data = rx_buf->data;
1748         data_ptr = rx_buf->data_ptr;
1749         prefetch(data_ptr);
1750
1751         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1752         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1753
1754         if (agg_bufs) {
1755                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1756                         return -EBUSY;
1757
1758                 cp_cons = NEXT_CMP(cp_cons);
1759                 *event |= BNXT_AGG_EVENT;
1760         }
1761         *event |= BNXT_RX_EVENT;
1762
1763         rx_buf->data = NULL;
1764         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1765                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1766
1767                 bnxt_reuse_rx_data(rxr, cons, data);
1768                 if (agg_bufs)
1769                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1770                                                false);
1771
1772                 rc = -EIO;
1773                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1774                         bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1775                         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
1776                                 netdev_warn(bp->dev, "RX buffer error %x\n",
1777                                             rx_err);
1778                                 bnxt_sched_reset(bp, rxr);
1779                         }
1780                 }
1781                 goto next_rx_no_len;
1782         }
1783
1784         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1785         dma_addr = rx_buf->mapping;
1786
1787         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1788                 rc = 1;
1789                 goto next_rx;
1790         }
1791
1792         if (len <= bp->rx_copy_thresh) {
1793                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1794                 bnxt_reuse_rx_data(rxr, cons, data);
1795                 if (!skb) {
1796                         if (agg_bufs)
1797                                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1798                                                        agg_bufs, false);
1799                         rc = -ENOMEM;
1800                         goto next_rx;
1801                 }
1802         } else {
1803                 u32 payload;
1804
1805                 if (rx_buf->data_ptr == data_ptr)
1806                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1807                 else
1808                         payload = 0;
1809                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1810                                       payload | len);
1811                 if (!skb) {
1812                         rc = -ENOMEM;
1813                         goto next_rx;
1814                 }
1815         }
1816
1817         if (agg_bufs) {
1818                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1819                 if (!skb) {
1820                         rc = -ENOMEM;
1821                         goto next_rx;
1822                 }
1823         }
1824
1825         if (RX_CMP_HASH_VALID(rxcmp)) {
1826                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1827                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1828
1829                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1830                 if (hash_type != 1 && hash_type != 3)
1831                         type = PKT_HASH_TYPE_L3;
1832                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1833         }
1834
1835         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1836         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1837
1838         if ((rxcmp1->rx_cmp_flags2 &
1839              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1840             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1841                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1842                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1843                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1844
1845                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1846         }
1847
1848         skb_checksum_none_assert(skb);
1849         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1850                 if (dev->features & NETIF_F_RXCSUM) {
1851                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1852                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1853                 }
1854         } else {
1855                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1856                         if (dev->features & NETIF_F_RXCSUM)
1857                                 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1858                 }
1859         }
1860
1861         bnxt_deliver_skb(bp, bnapi, skb);
1862         rc = 1;
1863
1864 next_rx:
1865         cpr->rx_packets += 1;
1866         cpr->rx_bytes += len;
1867
1868 next_rx_no_len:
1869         rxr->rx_prod = NEXT_RX(prod);
1870         rxr->rx_next_cons = NEXT_RX(cons);
1871
1872 next_rx_no_prod_no_len:
1873         *raw_cons = tmp_raw_cons;
1874
1875         return rc;
1876 }
1877
1878 /* In netpoll mode, if we are using a combined completion ring, we need to
1879  * discard the rx packets and recycle the buffers.
1880  */
1881 static int bnxt_force_rx_discard(struct bnxt *bp,
1882                                  struct bnxt_cp_ring_info *cpr,
1883                                  u32 *raw_cons, u8 *event)
1884 {
1885         u32 tmp_raw_cons = *raw_cons;
1886         struct rx_cmp_ext *rxcmp1;
1887         struct rx_cmp *rxcmp;
1888         u16 cp_cons;
1889         u8 cmp_type;
1890
1891         cp_cons = RING_CMP(tmp_raw_cons);
1892         rxcmp = (struct rx_cmp *)
1893                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1894
1895         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1896         cp_cons = RING_CMP(tmp_raw_cons);
1897         rxcmp1 = (struct rx_cmp_ext *)
1898                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1899
1900         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1901                 return -EBUSY;
1902
1903         cmp_type = RX_CMP_TYPE(rxcmp);
1904         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1905                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1906                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1907         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1908                 struct rx_tpa_end_cmp_ext *tpa_end1;
1909
1910                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1911                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1912                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1913         }
1914         return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1915 }
1916
1917 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1918 {
1919         struct bnxt_fw_health *fw_health = bp->fw_health;
1920         u32 reg = fw_health->regs[reg_idx];
1921         u32 reg_type, reg_off, val = 0;
1922
1923         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1924         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1925         switch (reg_type) {
1926         case BNXT_FW_HEALTH_REG_TYPE_CFG:
1927                 pci_read_config_dword(bp->pdev, reg_off, &val);
1928                 break;
1929         case BNXT_FW_HEALTH_REG_TYPE_GRC:
1930                 reg_off = fw_health->mapped_regs[reg_idx];
1931                 fallthrough;
1932         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1933                 val = readl(bp->bar0 + reg_off);
1934                 break;
1935         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1936                 val = readl(bp->bar1 + reg_off);
1937                 break;
1938         }
1939         if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1940                 val &= fw_health->fw_reset_inprog_reg_mask;
1941         return val;
1942 }
1943
1944 #define BNXT_GET_EVENT_PORT(data)       \
1945         ((data) &                       \
1946          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1947
1948 static int bnxt_async_event_process(struct bnxt *bp,
1949                                     struct hwrm_async_event_cmpl *cmpl)
1950 {
1951         u16 event_id = le16_to_cpu(cmpl->event_id);
1952
1953         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1954         switch (event_id) {
1955         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1956                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1957                 struct bnxt_link_info *link_info = &bp->link_info;
1958
1959                 if (BNXT_VF(bp))
1960                         goto async_event_process_exit;
1961
1962                 /* print unsupported speed warning in forced speed mode only */
1963                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1964                     (data1 & 0x20000)) {
1965                         u16 fw_speed = link_info->force_link_speed;
1966                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1967
1968                         if (speed != SPEED_UNKNOWN)
1969                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1970                                             speed);
1971                 }
1972                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1973         }
1974                 fallthrough;
1975         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
1976         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
1977                 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
1978                 fallthrough;
1979         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1980                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1981                 break;
1982         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1983                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1984                 break;
1985         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1986                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1987                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1988
1989                 if (BNXT_VF(bp))
1990                         break;
1991
1992                 if (bp->pf.port_id != port_id)
1993                         break;
1994
1995                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1996                 break;
1997         }
1998         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1999                 if (BNXT_PF(bp))
2000                         goto async_event_process_exit;
2001                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2002                 break;
2003         case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2004                 u32 data1 = le32_to_cpu(cmpl->event_data1);
2005
2006                 if (!bp->fw_health)
2007                         goto async_event_process_exit;
2008
2009                 bp->fw_reset_timestamp = jiffies;
2010                 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2011                 if (!bp->fw_reset_min_dsecs)
2012                         bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2013                 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2014                 if (!bp->fw_reset_max_dsecs)
2015                         bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2016                 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2017                         netdev_warn(bp->dev, "Firmware fatal reset event received\n");
2018                         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2019                 } else {
2020                         netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
2021                                     bp->fw_reset_max_dsecs * 100);
2022                 }
2023                 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2024                 break;
2025         }
2026         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2027                 struct bnxt_fw_health *fw_health = bp->fw_health;
2028                 u32 data1 = le32_to_cpu(cmpl->event_data1);
2029
2030                 if (!fw_health)
2031                         goto async_event_process_exit;
2032
2033                 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2034                 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2035                 if (!fw_health->enabled)
2036                         break;
2037
2038                 if (netif_msg_drv(bp))
2039                         netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
2040                                     fw_health->enabled, fw_health->master,
2041                                     bnxt_fw_health_readl(bp,
2042                                                          BNXT_FW_RESET_CNT_REG),
2043                                     bnxt_fw_health_readl(bp,
2044                                                          BNXT_FW_HEALTH_REG));
2045                 fw_health->tmr_multiplier =
2046                         DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2047                                      bp->current_interval * 10);
2048                 fw_health->tmr_counter = fw_health->tmr_multiplier;
2049                 fw_health->last_fw_heartbeat =
2050                         bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2051                 fw_health->last_fw_reset_cnt =
2052                         bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2053                 goto async_event_process_exit;
2054         }
2055         default:
2056                 goto async_event_process_exit;
2057         }
2058         bnxt_queue_sp_work(bp);
2059 async_event_process_exit:
2060         bnxt_ulp_async_events(bp, cmpl);
2061         return 0;
2062 }
2063
2064 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2065 {
2066         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2067         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2068         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2069                                 (struct hwrm_fwd_req_cmpl *)txcmp;
2070
2071         switch (cmpl_type) {
2072         case CMPL_BASE_TYPE_HWRM_DONE:
2073                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2074                 if (seq_id == bp->hwrm_intr_seq_id)
2075                         bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2076                 else
2077                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2078                 break;
2079
2080         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2081                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2082
2083                 if ((vf_id < bp->pf.first_vf_id) ||
2084                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2085                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2086                                    vf_id);
2087                         return -EINVAL;
2088                 }
2089
2090                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2091                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2092                 bnxt_queue_sp_work(bp);
2093                 break;
2094
2095         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2096                 bnxt_async_event_process(bp,
2097                                          (struct hwrm_async_event_cmpl *)txcmp);
2098
2099         default:
2100                 break;
2101         }
2102
2103         return 0;
2104 }
2105
2106 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2107 {
2108         struct bnxt_napi *bnapi = dev_instance;
2109         struct bnxt *bp = bnapi->bp;
2110         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2111         u32 cons = RING_CMP(cpr->cp_raw_cons);
2112
2113         cpr->event_ctr++;
2114         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2115         napi_schedule(&bnapi->napi);
2116         return IRQ_HANDLED;
2117 }
2118
2119 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2120 {
2121         u32 raw_cons = cpr->cp_raw_cons;
2122         u16 cons = RING_CMP(raw_cons);
2123         struct tx_cmp *txcmp;
2124
2125         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2126
2127         return TX_CMP_VALID(txcmp, raw_cons);
2128 }
2129
2130 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2131 {
2132         struct bnxt_napi *bnapi = dev_instance;
2133         struct bnxt *bp = bnapi->bp;
2134         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2135         u32 cons = RING_CMP(cpr->cp_raw_cons);
2136         u32 int_status;
2137
2138         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2139
2140         if (!bnxt_has_work(bp, cpr)) {
2141                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2142                 /* return if erroneous interrupt */
2143                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2144                         return IRQ_NONE;
2145         }
2146
2147         /* disable ring IRQ */
2148         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2149
2150         /* Return here if interrupt is shared and is disabled. */
2151         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2152                 return IRQ_HANDLED;
2153
2154         napi_schedule(&bnapi->napi);
2155         return IRQ_HANDLED;
2156 }
2157
2158 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2159                             int budget)
2160 {
2161         struct bnxt_napi *bnapi = cpr->bnapi;
2162         u32 raw_cons = cpr->cp_raw_cons;
2163         u32 cons;
2164         int tx_pkts = 0;
2165         int rx_pkts = 0;
2166         u8 event = 0;
2167         struct tx_cmp *txcmp;
2168
2169         cpr->has_more_work = 0;
2170         cpr->had_work_done = 1;
2171         while (1) {
2172                 int rc;
2173
2174                 cons = RING_CMP(raw_cons);
2175                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2176
2177                 if (!TX_CMP_VALID(txcmp, raw_cons))
2178                         break;
2179
2180                 /* The valid test of the entry must be done first before
2181                  * reading any further.
2182                  */
2183                 dma_rmb();
2184                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2185                         tx_pkts++;
2186                         /* return full budget so NAPI will complete. */
2187                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2188                                 rx_pkts = budget;
2189                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2190                                 if (budget)
2191                                         cpr->has_more_work = 1;
2192                                 break;
2193                         }
2194                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2195                         if (likely(budget))
2196                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2197                         else
2198                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2199                                                            &event);
2200                         if (likely(rc >= 0))
2201                                 rx_pkts += rc;
2202                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2203                          * the NAPI budget.  Otherwise, we may potentially loop
2204                          * here forever if we consistently cannot allocate
2205                          * buffers.
2206                          */
2207                         else if (rc == -ENOMEM && budget)
2208                                 rx_pkts++;
2209                         else if (rc == -EBUSY)  /* partial completion */
2210                                 break;
2211                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2212                                      CMPL_BASE_TYPE_HWRM_DONE) ||
2213                                     (TX_CMP_TYPE(txcmp) ==
2214                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2215                                     (TX_CMP_TYPE(txcmp) ==
2216                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2217                         bnxt_hwrm_handler(bp, txcmp);
2218                 }
2219                 raw_cons = NEXT_RAW_CMP(raw_cons);
2220
2221                 if (rx_pkts && rx_pkts == budget) {
2222                         cpr->has_more_work = 1;
2223                         break;
2224                 }
2225         }
2226
2227         if (event & BNXT_REDIRECT_EVENT)
2228                 xdp_do_flush_map();
2229
2230         if (event & BNXT_TX_EVENT) {
2231                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2232                 u16 prod = txr->tx_prod;
2233
2234                 /* Sync BD data before updating doorbell */
2235                 wmb();
2236
2237                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2238         }
2239
2240         cpr->cp_raw_cons = raw_cons;
2241         bnapi->tx_pkts += tx_pkts;
2242         bnapi->events |= event;
2243         return rx_pkts;
2244 }
2245
2246 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2247 {
2248         if (bnapi->tx_pkts) {
2249                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2250                 bnapi->tx_pkts = 0;
2251         }
2252
2253         if (bnapi->events & BNXT_RX_EVENT) {
2254                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2255
2256                 if (bnapi->events & BNXT_AGG_EVENT)
2257                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2258                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2259         }
2260         bnapi->events = 0;
2261 }
2262
2263 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2264                           int budget)
2265 {
2266         struct bnxt_napi *bnapi = cpr->bnapi;
2267         int rx_pkts;
2268
2269         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2270
2271         /* ACK completion ring before freeing tx ring and producing new
2272          * buffers in rx/agg rings to prevent overflowing the completion
2273          * ring.
2274          */
2275         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2276
2277         __bnxt_poll_work_done(bp, bnapi);
2278         return rx_pkts;
2279 }
2280
2281 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2282 {
2283         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2284         struct bnxt *bp = bnapi->bp;
2285         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2286         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2287         struct tx_cmp *txcmp;
2288         struct rx_cmp_ext *rxcmp1;
2289         u32 cp_cons, tmp_raw_cons;
2290         u32 raw_cons = cpr->cp_raw_cons;
2291         u32 rx_pkts = 0;
2292         u8 event = 0;
2293
2294         while (1) {
2295                 int rc;
2296
2297                 cp_cons = RING_CMP(raw_cons);
2298                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2299
2300                 if (!TX_CMP_VALID(txcmp, raw_cons))
2301                         break;
2302
2303                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2304                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2305                         cp_cons = RING_CMP(tmp_raw_cons);
2306                         rxcmp1 = (struct rx_cmp_ext *)
2307                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2308
2309                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2310                                 break;
2311
2312                         /* force an error to recycle the buffer */
2313                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2314                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2315
2316                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2317                         if (likely(rc == -EIO) && budget)
2318                                 rx_pkts++;
2319                         else if (rc == -EBUSY)  /* partial completion */
2320                                 break;
2321                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2322                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2323                         bnxt_hwrm_handler(bp, txcmp);
2324                 } else {
2325                         netdev_err(bp->dev,
2326                                    "Invalid completion received on special ring\n");
2327                 }
2328                 raw_cons = NEXT_RAW_CMP(raw_cons);
2329
2330                 if (rx_pkts == budget)
2331                         break;
2332         }
2333
2334         cpr->cp_raw_cons = raw_cons;
2335         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2336         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2337
2338         if (event & BNXT_AGG_EVENT)
2339                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2340
2341         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2342                 napi_complete_done(napi, rx_pkts);
2343                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2344         }
2345         return rx_pkts;
2346 }
2347
2348 static int bnxt_poll(struct napi_struct *napi, int budget)
2349 {
2350         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2351         struct bnxt *bp = bnapi->bp;
2352         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2353         int work_done = 0;
2354
2355         while (1) {
2356                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2357
2358                 if (work_done >= budget) {
2359                         if (!budget)
2360                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2361                         break;
2362                 }
2363
2364                 if (!bnxt_has_work(bp, cpr)) {
2365                         if (napi_complete_done(napi, work_done))
2366                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2367                         break;
2368                 }
2369         }
2370         if (bp->flags & BNXT_FLAG_DIM) {
2371                 struct dim_sample dim_sample = {};
2372
2373                 dim_update_sample(cpr->event_ctr,
2374                                   cpr->rx_packets,
2375                                   cpr->rx_bytes,
2376                                   &dim_sample);
2377                 net_dim(&cpr->dim, dim_sample);
2378         }
2379         return work_done;
2380 }
2381
2382 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2383 {
2384         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2385         int i, work_done = 0;
2386
2387         for (i = 0; i < 2; i++) {
2388                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2389
2390                 if (cpr2) {
2391                         work_done += __bnxt_poll_work(bp, cpr2,
2392                                                       budget - work_done);
2393                         cpr->has_more_work |= cpr2->has_more_work;
2394                 }
2395         }
2396         return work_done;
2397 }
2398
2399 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2400                                  u64 dbr_type)
2401 {
2402         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2403         int i;
2404
2405         for (i = 0; i < 2; i++) {
2406                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2407                 struct bnxt_db_info *db;
2408
2409                 if (cpr2 && cpr2->had_work_done) {
2410                         db = &cpr2->cp_db;
2411                         writeq(db->db_key64 | dbr_type |
2412                                RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2413                         cpr2->had_work_done = 0;
2414                 }
2415         }
2416         __bnxt_poll_work_done(bp, bnapi);
2417 }
2418
2419 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2420 {
2421         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2422         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2423         u32 raw_cons = cpr->cp_raw_cons;
2424         struct bnxt *bp = bnapi->bp;
2425         struct nqe_cn *nqcmp;
2426         int work_done = 0;
2427         u32 cons;
2428
2429         if (cpr->has_more_work) {
2430                 cpr->has_more_work = 0;
2431                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2432         }
2433         while (1) {
2434                 cons = RING_CMP(raw_cons);
2435                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2436
2437                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2438                         if (cpr->has_more_work)
2439                                 break;
2440
2441                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2442                         cpr->cp_raw_cons = raw_cons;
2443                         if (napi_complete_done(napi, work_done))
2444                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2445                                                   cpr->cp_raw_cons);
2446                         return work_done;
2447                 }
2448
2449                 /* The valid test of the entry must be done first before
2450                  * reading any further.
2451                  */
2452                 dma_rmb();
2453
2454                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2455                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2456                         struct bnxt_cp_ring_info *cpr2;
2457
2458                         cpr2 = cpr->cp_ring_arr[idx];
2459                         work_done += __bnxt_poll_work(bp, cpr2,
2460                                                       budget - work_done);
2461                         cpr->has_more_work |= cpr2->has_more_work;
2462                 } else {
2463                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2464                 }
2465                 raw_cons = NEXT_RAW_CMP(raw_cons);
2466         }
2467         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2468         if (raw_cons != cpr->cp_raw_cons) {
2469                 cpr->cp_raw_cons = raw_cons;
2470                 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2471         }
2472         return work_done;
2473 }
2474
2475 static void bnxt_free_tx_skbs(struct bnxt *bp)
2476 {
2477         int i, max_idx;
2478         struct pci_dev *pdev = bp->pdev;
2479
2480         if (!bp->tx_ring)
2481                 return;
2482
2483         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2484         for (i = 0; i < bp->tx_nr_rings; i++) {
2485                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2486                 int j;
2487
2488                 for (j = 0; j < max_idx;) {
2489                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2490                         struct sk_buff *skb;
2491                         int k, last;
2492
2493                         if (i < bp->tx_nr_rings_xdp &&
2494                             tx_buf->action == XDP_REDIRECT) {
2495                                 dma_unmap_single(&pdev->dev,
2496                                         dma_unmap_addr(tx_buf, mapping),
2497                                         dma_unmap_len(tx_buf, len),
2498                                         PCI_DMA_TODEVICE);
2499                                 xdp_return_frame(tx_buf->xdpf);
2500                                 tx_buf->action = 0;
2501                                 tx_buf->xdpf = NULL;
2502                                 j++;
2503                                 continue;
2504                         }
2505
2506                         skb = tx_buf->skb;
2507                         if (!skb) {
2508                                 j++;
2509                                 continue;
2510                         }
2511
2512                         tx_buf->skb = NULL;
2513
2514                         if (tx_buf->is_push) {
2515                                 dev_kfree_skb(skb);
2516                                 j += 2;
2517                                 continue;
2518                         }
2519
2520                         dma_unmap_single(&pdev->dev,
2521                                          dma_unmap_addr(tx_buf, mapping),
2522                                          skb_headlen(skb),
2523                                          PCI_DMA_TODEVICE);
2524
2525                         last = tx_buf->nr_frags;
2526                         j += 2;
2527                         for (k = 0; k < last; k++, j++) {
2528                                 int ring_idx = j & bp->tx_ring_mask;
2529                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2530
2531                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2532                                 dma_unmap_page(
2533                                         &pdev->dev,
2534                                         dma_unmap_addr(tx_buf, mapping),
2535                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
2536                         }
2537                         dev_kfree_skb(skb);
2538                 }
2539                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2540         }
2541 }
2542
2543 static void bnxt_free_rx_skbs(struct bnxt *bp)
2544 {
2545         int i, max_idx, max_agg_idx;
2546         struct pci_dev *pdev = bp->pdev;
2547
2548         if (!bp->rx_ring)
2549                 return;
2550
2551         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2552         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2553         for (i = 0; i < bp->rx_nr_rings; i++) {
2554                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2555                 struct bnxt_tpa_idx_map *map;
2556                 int j;
2557
2558                 if (rxr->rx_tpa) {
2559                         for (j = 0; j < bp->max_tpa; j++) {
2560                                 struct bnxt_tpa_info *tpa_info =
2561                                                         &rxr->rx_tpa[j];
2562                                 u8 *data = tpa_info->data;
2563
2564                                 if (!data)
2565                                         continue;
2566
2567                                 dma_unmap_single_attrs(&pdev->dev,
2568                                                        tpa_info->mapping,
2569                                                        bp->rx_buf_use_size,
2570                                                        bp->rx_dir,
2571                                                        DMA_ATTR_WEAK_ORDERING);
2572
2573                                 tpa_info->data = NULL;
2574
2575                                 kfree(data);
2576                         }
2577                 }
2578
2579                 for (j = 0; j < max_idx; j++) {
2580                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2581                         dma_addr_t mapping = rx_buf->mapping;
2582                         void *data = rx_buf->data;
2583
2584                         if (!data)
2585                                 continue;
2586
2587                         rx_buf->data = NULL;
2588
2589                         if (BNXT_RX_PAGE_MODE(bp)) {
2590                                 mapping -= bp->rx_dma_offset;
2591                                 dma_unmap_page_attrs(&pdev->dev, mapping,
2592                                                      PAGE_SIZE, bp->rx_dir,
2593                                                      DMA_ATTR_WEAK_ORDERING);
2594                                 page_pool_recycle_direct(rxr->page_pool, data);
2595                         } else {
2596                                 dma_unmap_single_attrs(&pdev->dev, mapping,
2597                                                        bp->rx_buf_use_size,
2598                                                        bp->rx_dir,
2599                                                        DMA_ATTR_WEAK_ORDERING);
2600                                 kfree(data);
2601                         }
2602                 }
2603
2604                 for (j = 0; j < max_agg_idx; j++) {
2605                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2606                                 &rxr->rx_agg_ring[j];
2607                         struct page *page = rx_agg_buf->page;
2608
2609                         if (!page)
2610                                 continue;
2611
2612                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2613                                              BNXT_RX_PAGE_SIZE,
2614                                              PCI_DMA_FROMDEVICE,
2615                                              DMA_ATTR_WEAK_ORDERING);
2616
2617                         rx_agg_buf->page = NULL;
2618                         __clear_bit(j, rxr->rx_agg_bmap);
2619
2620                         __free_page(page);
2621                 }
2622                 if (rxr->rx_page) {
2623                         __free_page(rxr->rx_page);
2624                         rxr->rx_page = NULL;
2625                 }
2626                 map = rxr->rx_tpa_idx_map;
2627                 if (map)
2628                         memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2629         }
2630 }
2631
2632 static void bnxt_free_skbs(struct bnxt *bp)
2633 {
2634         bnxt_free_tx_skbs(bp);
2635         bnxt_free_rx_skbs(bp);
2636 }
2637
2638 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2639 {
2640         struct pci_dev *pdev = bp->pdev;
2641         int i;
2642
2643         for (i = 0; i < rmem->nr_pages; i++) {
2644                 if (!rmem->pg_arr[i])
2645                         continue;
2646
2647                 dma_free_coherent(&pdev->dev, rmem->page_size,
2648                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2649
2650                 rmem->pg_arr[i] = NULL;
2651         }
2652         if (rmem->pg_tbl) {
2653                 size_t pg_tbl_size = rmem->nr_pages * 8;
2654
2655                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2656                         pg_tbl_size = rmem->page_size;
2657                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2658                                   rmem->pg_tbl, rmem->pg_tbl_map);
2659                 rmem->pg_tbl = NULL;
2660         }
2661         if (rmem->vmem_size && *rmem->vmem) {
2662                 vfree(*rmem->vmem);
2663                 *rmem->vmem = NULL;
2664         }
2665 }
2666
2667 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2668 {
2669         struct pci_dev *pdev = bp->pdev;
2670         u64 valid_bit = 0;
2671         int i;
2672
2673         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2674                 valid_bit = PTU_PTE_VALID;
2675         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2676                 size_t pg_tbl_size = rmem->nr_pages * 8;
2677
2678                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2679                         pg_tbl_size = rmem->page_size;
2680                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2681                                                   &rmem->pg_tbl_map,
2682                                                   GFP_KERNEL);
2683                 if (!rmem->pg_tbl)
2684                         return -ENOMEM;
2685         }
2686
2687         for (i = 0; i < rmem->nr_pages; i++) {
2688                 u64 extra_bits = valid_bit;
2689
2690                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2691                                                      rmem->page_size,
2692                                                      &rmem->dma_arr[i],
2693                                                      GFP_KERNEL);
2694                 if (!rmem->pg_arr[i])
2695                         return -ENOMEM;
2696
2697                 if (rmem->init_val)
2698                         memset(rmem->pg_arr[i], rmem->init_val,
2699                                rmem->page_size);
2700                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2701                         if (i == rmem->nr_pages - 2 &&
2702                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2703                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2704                         else if (i == rmem->nr_pages - 1 &&
2705                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2706                                 extra_bits |= PTU_PTE_LAST;
2707                         rmem->pg_tbl[i] =
2708                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2709                 }
2710         }
2711
2712         if (rmem->vmem_size) {
2713                 *rmem->vmem = vzalloc(rmem->vmem_size);
2714                 if (!(*rmem->vmem))
2715                         return -ENOMEM;
2716         }
2717         return 0;
2718 }
2719
2720 static void bnxt_free_tpa_info(struct bnxt *bp)
2721 {
2722         int i;
2723
2724         for (i = 0; i < bp->rx_nr_rings; i++) {
2725                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2726
2727                 kfree(rxr->rx_tpa_idx_map);
2728                 rxr->rx_tpa_idx_map = NULL;
2729                 if (rxr->rx_tpa) {
2730                         kfree(rxr->rx_tpa[0].agg_arr);
2731                         rxr->rx_tpa[0].agg_arr = NULL;
2732                 }
2733                 kfree(rxr->rx_tpa);
2734                 rxr->rx_tpa = NULL;
2735         }
2736 }
2737
2738 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2739 {
2740         int i, j, total_aggs = 0;
2741
2742         bp->max_tpa = MAX_TPA;
2743         if (bp->flags & BNXT_FLAG_CHIP_P5) {
2744                 if (!bp->max_tpa_v2)
2745                         return 0;
2746                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2747                 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2748         }
2749
2750         for (i = 0; i < bp->rx_nr_rings; i++) {
2751                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2752                 struct rx_agg_cmp *agg;
2753
2754                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2755                                       GFP_KERNEL);
2756                 if (!rxr->rx_tpa)
2757                         return -ENOMEM;
2758
2759                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2760                         continue;
2761                 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2762                 rxr->rx_tpa[0].agg_arr = agg;
2763                 if (!agg)
2764                         return -ENOMEM;
2765                 for (j = 1; j < bp->max_tpa; j++)
2766                         rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2767                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2768                                               GFP_KERNEL);
2769                 if (!rxr->rx_tpa_idx_map)
2770                         return -ENOMEM;
2771         }
2772         return 0;
2773 }
2774
2775 static void bnxt_free_rx_rings(struct bnxt *bp)
2776 {
2777         int i;
2778
2779         if (!bp->rx_ring)
2780                 return;
2781
2782         bnxt_free_tpa_info(bp);
2783         for (i = 0; i < bp->rx_nr_rings; i++) {
2784                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2785                 struct bnxt_ring_struct *ring;
2786
2787                 if (rxr->xdp_prog)
2788                         bpf_prog_put(rxr->xdp_prog);
2789
2790                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2791                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2792
2793                 page_pool_destroy(rxr->page_pool);
2794                 rxr->page_pool = NULL;
2795
2796                 kfree(rxr->rx_agg_bmap);
2797                 rxr->rx_agg_bmap = NULL;
2798
2799                 ring = &rxr->rx_ring_struct;
2800                 bnxt_free_ring(bp, &ring->ring_mem);
2801
2802                 ring = &rxr->rx_agg_ring_struct;
2803                 bnxt_free_ring(bp, &ring->ring_mem);
2804         }
2805 }
2806
2807 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2808                                    struct bnxt_rx_ring_info *rxr)
2809 {
2810         struct page_pool_params pp = { 0 };
2811
2812         pp.pool_size = bp->rx_ring_size;
2813         pp.nid = dev_to_node(&bp->pdev->dev);
2814         pp.dev = &bp->pdev->dev;
2815         pp.dma_dir = DMA_BIDIRECTIONAL;
2816
2817         rxr->page_pool = page_pool_create(&pp);
2818         if (IS_ERR(rxr->page_pool)) {
2819                 int err = PTR_ERR(rxr->page_pool);
2820
2821                 rxr->page_pool = NULL;
2822                 return err;
2823         }
2824         return 0;
2825 }
2826
2827 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2828 {
2829         int i, rc = 0, agg_rings = 0;
2830
2831         if (!bp->rx_ring)
2832                 return -ENOMEM;
2833
2834         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2835                 agg_rings = 1;
2836
2837         for (i = 0; i < bp->rx_nr_rings; i++) {
2838                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2839                 struct bnxt_ring_struct *ring;
2840
2841                 ring = &rxr->rx_ring_struct;
2842
2843                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2844                 if (rc)
2845                         return rc;
2846
2847                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2848                 if (rc < 0)
2849                         return rc;
2850
2851                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
2852                                                 MEM_TYPE_PAGE_POOL,
2853                                                 rxr->page_pool);
2854                 if (rc) {
2855                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2856                         return rc;
2857                 }
2858
2859                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2860                 if (rc)
2861                         return rc;
2862
2863                 ring->grp_idx = i;
2864                 if (agg_rings) {
2865                         u16 mem_size;
2866
2867                         ring = &rxr->rx_agg_ring_struct;
2868                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2869                         if (rc)
2870                                 return rc;
2871
2872                         ring->grp_idx = i;
2873                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2874                         mem_size = rxr->rx_agg_bmap_size / 8;
2875                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2876                         if (!rxr->rx_agg_bmap)
2877                                 return -ENOMEM;
2878                 }
2879         }
2880         if (bp->flags & BNXT_FLAG_TPA)
2881                 rc = bnxt_alloc_tpa_info(bp);
2882         return rc;
2883 }
2884
2885 static void bnxt_free_tx_rings(struct bnxt *bp)
2886 {
2887         int i;
2888         struct pci_dev *pdev = bp->pdev;
2889
2890         if (!bp->tx_ring)
2891                 return;
2892
2893         for (i = 0; i < bp->tx_nr_rings; i++) {
2894                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2895                 struct bnxt_ring_struct *ring;
2896
2897                 if (txr->tx_push) {
2898                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2899                                           txr->tx_push, txr->tx_push_mapping);
2900                         txr->tx_push = NULL;
2901                 }
2902
2903                 ring = &txr->tx_ring_struct;
2904
2905                 bnxt_free_ring(bp, &ring->ring_mem);
2906         }
2907 }
2908
2909 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2910 {
2911         int i, j, rc;
2912         struct pci_dev *pdev = bp->pdev;
2913
2914         bp->tx_push_size = 0;
2915         if (bp->tx_push_thresh) {
2916                 int push_size;
2917
2918                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2919                                         bp->tx_push_thresh);
2920
2921                 if (push_size > 256) {
2922                         push_size = 0;
2923                         bp->tx_push_thresh = 0;
2924                 }
2925
2926                 bp->tx_push_size = push_size;
2927         }
2928
2929         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2930                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2931                 struct bnxt_ring_struct *ring;
2932                 u8 qidx;
2933
2934                 ring = &txr->tx_ring_struct;
2935
2936                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2937                 if (rc)
2938                         return rc;
2939
2940                 ring->grp_idx = txr->bnapi->index;
2941                 if (bp->tx_push_size) {
2942                         dma_addr_t mapping;
2943
2944                         /* One pre-allocated DMA buffer to backup
2945                          * TX push operation
2946                          */
2947                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2948                                                 bp->tx_push_size,
2949                                                 &txr->tx_push_mapping,
2950                                                 GFP_KERNEL);
2951
2952                         if (!txr->tx_push)
2953                                 return -ENOMEM;
2954
2955                         mapping = txr->tx_push_mapping +
2956                                 sizeof(struct tx_push_bd);
2957                         txr->data_mapping = cpu_to_le64(mapping);
2958                 }
2959                 qidx = bp->tc_to_qidx[j];
2960                 ring->queue_id = bp->q_info[qidx].queue_id;
2961                 if (i < bp->tx_nr_rings_xdp)
2962                         continue;
2963                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2964                         j++;
2965         }
2966         return 0;
2967 }
2968
2969 static void bnxt_free_cp_rings(struct bnxt *bp)
2970 {
2971         int i;
2972
2973         if (!bp->bnapi)
2974                 return;
2975
2976         for (i = 0; i < bp->cp_nr_rings; i++) {
2977                 struct bnxt_napi *bnapi = bp->bnapi[i];
2978                 struct bnxt_cp_ring_info *cpr;
2979                 struct bnxt_ring_struct *ring;
2980                 int j;
2981
2982                 if (!bnapi)
2983                         continue;
2984
2985                 cpr = &bnapi->cp_ring;
2986                 ring = &cpr->cp_ring_struct;
2987
2988                 bnxt_free_ring(bp, &ring->ring_mem);
2989
2990                 for (j = 0; j < 2; j++) {
2991                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2992
2993                         if (cpr2) {
2994                                 ring = &cpr2->cp_ring_struct;
2995                                 bnxt_free_ring(bp, &ring->ring_mem);
2996                                 kfree(cpr2);
2997                                 cpr->cp_ring_arr[j] = NULL;
2998                         }
2999                 }
3000         }
3001 }
3002
3003 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3004 {
3005         struct bnxt_ring_mem_info *rmem;
3006         struct bnxt_ring_struct *ring;
3007         struct bnxt_cp_ring_info *cpr;
3008         int rc;
3009
3010         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3011         if (!cpr)
3012                 return NULL;
3013
3014         ring = &cpr->cp_ring_struct;
3015         rmem = &ring->ring_mem;
3016         rmem->nr_pages = bp->cp_nr_pages;
3017         rmem->page_size = HW_CMPD_RING_SIZE;
3018         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3019         rmem->dma_arr = cpr->cp_desc_mapping;
3020         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3021         rc = bnxt_alloc_ring(bp, rmem);
3022         if (rc) {
3023                 bnxt_free_ring(bp, rmem);
3024                 kfree(cpr);
3025                 cpr = NULL;
3026         }
3027         return cpr;
3028 }
3029
3030 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3031 {
3032         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3033         int i, rc, ulp_base_vec, ulp_msix;
3034
3035         ulp_msix = bnxt_get_ulp_msix_num(bp);
3036         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3037         for (i = 0; i < bp->cp_nr_rings; i++) {
3038                 struct bnxt_napi *bnapi = bp->bnapi[i];
3039                 struct bnxt_cp_ring_info *cpr;
3040                 struct bnxt_ring_struct *ring;
3041
3042                 if (!bnapi)
3043                         continue;
3044
3045                 cpr = &bnapi->cp_ring;
3046                 cpr->bnapi = bnapi;
3047                 ring = &cpr->cp_ring_struct;
3048
3049                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3050                 if (rc)
3051                         return rc;
3052
3053                 if (ulp_msix && i >= ulp_base_vec)
3054                         ring->map_idx = i + ulp_msix;
3055                 else
3056                         ring->map_idx = i;
3057
3058                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3059                         continue;
3060
3061                 if (i < bp->rx_nr_rings) {
3062                         struct bnxt_cp_ring_info *cpr2 =
3063                                 bnxt_alloc_cp_sub_ring(bp);
3064
3065                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3066                         if (!cpr2)
3067                                 return -ENOMEM;
3068                         cpr2->bnapi = bnapi;
3069                 }
3070                 if ((sh && i < bp->tx_nr_rings) ||
3071                     (!sh && i >= bp->rx_nr_rings)) {
3072                         struct bnxt_cp_ring_info *cpr2 =
3073                                 bnxt_alloc_cp_sub_ring(bp);
3074
3075                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3076                         if (!cpr2)
3077                                 return -ENOMEM;
3078                         cpr2->bnapi = bnapi;
3079                 }
3080         }
3081         return 0;
3082 }
3083
3084 static void bnxt_init_ring_struct(struct bnxt *bp)
3085 {
3086         int i;
3087
3088         for (i = 0; i < bp->cp_nr_rings; i++) {
3089                 struct bnxt_napi *bnapi = bp->bnapi[i];
3090                 struct bnxt_ring_mem_info *rmem;
3091                 struct bnxt_cp_ring_info *cpr;
3092                 struct bnxt_rx_ring_info *rxr;
3093                 struct bnxt_tx_ring_info *txr;
3094                 struct bnxt_ring_struct *ring;
3095
3096                 if (!bnapi)
3097                         continue;
3098
3099                 cpr = &bnapi->cp_ring;
3100                 ring = &cpr->cp_ring_struct;
3101                 rmem = &ring->ring_mem;
3102                 rmem->nr_pages = bp->cp_nr_pages;
3103                 rmem->page_size = HW_CMPD_RING_SIZE;
3104                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3105                 rmem->dma_arr = cpr->cp_desc_mapping;
3106                 rmem->vmem_size = 0;
3107
3108                 rxr = bnapi->rx_ring;
3109                 if (!rxr)
3110                         goto skip_rx;
3111
3112                 ring = &rxr->rx_ring_struct;
3113                 rmem = &ring->ring_mem;
3114                 rmem->nr_pages = bp->rx_nr_pages;
3115                 rmem->page_size = HW_RXBD_RING_SIZE;
3116                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3117                 rmem->dma_arr = rxr->rx_desc_mapping;
3118                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3119                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3120
3121                 ring = &rxr->rx_agg_ring_struct;
3122                 rmem = &ring->ring_mem;
3123                 rmem->nr_pages = bp->rx_agg_nr_pages;
3124                 rmem->page_size = HW_RXBD_RING_SIZE;
3125                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3126                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3127                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3128                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3129
3130 skip_rx:
3131                 txr = bnapi->tx_ring;
3132                 if (!txr)
3133                         continue;
3134
3135                 ring = &txr->tx_ring_struct;
3136                 rmem = &ring->ring_mem;
3137                 rmem->nr_pages = bp->tx_nr_pages;
3138                 rmem->page_size = HW_RXBD_RING_SIZE;
3139                 rmem->pg_arr = (void **)txr->tx_desc_ring;
3140                 rmem->dma_arr = txr->tx_desc_mapping;
3141                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3142                 rmem->vmem = (void **)&txr->tx_buf_ring;
3143         }
3144 }
3145
3146 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3147 {
3148         int i;
3149         u32 prod;
3150         struct rx_bd **rx_buf_ring;
3151
3152         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3153         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3154                 int j;
3155                 struct rx_bd *rxbd;
3156
3157                 rxbd = rx_buf_ring[i];
3158                 if (!rxbd)
3159                         continue;
3160
3161                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3162                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3163                         rxbd->rx_bd_opaque = prod;
3164                 }
3165         }
3166 }
3167
3168 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3169 {
3170         struct net_device *dev = bp->dev;
3171         struct bnxt_rx_ring_info *rxr;
3172         struct bnxt_ring_struct *ring;
3173         u32 prod, type;
3174         int i;
3175
3176         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3177                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3178
3179         if (NET_IP_ALIGN == 2)
3180                 type |= RX_BD_FLAGS_SOP;
3181
3182         rxr = &bp->rx_ring[ring_nr];
3183         ring = &rxr->rx_ring_struct;
3184         bnxt_init_rxbd_pages(ring, type);
3185
3186         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3187                 bpf_prog_add(bp->xdp_prog, 1);
3188                 rxr->xdp_prog = bp->xdp_prog;
3189         }
3190         prod = rxr->rx_prod;
3191         for (i = 0; i < bp->rx_ring_size; i++) {
3192                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
3193                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3194                                     ring_nr, i, bp->rx_ring_size);
3195                         break;
3196                 }
3197                 prod = NEXT_RX(prod);
3198         }
3199         rxr->rx_prod = prod;
3200         ring->fw_ring_id = INVALID_HW_RING_ID;
3201
3202         ring = &rxr->rx_agg_ring_struct;
3203         ring->fw_ring_id = INVALID_HW_RING_ID;
3204
3205         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3206                 return 0;
3207
3208         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3209                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3210
3211         bnxt_init_rxbd_pages(ring, type);
3212
3213         prod = rxr->rx_agg_prod;
3214         for (i = 0; i < bp->rx_agg_ring_size; i++) {
3215                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
3216                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3217                                     ring_nr, i, bp->rx_ring_size);
3218                         break;
3219                 }
3220                 prod = NEXT_RX_AGG(prod);
3221         }
3222         rxr->rx_agg_prod = prod;
3223
3224         if (bp->flags & BNXT_FLAG_TPA) {
3225                 if (rxr->rx_tpa) {
3226                         u8 *data;
3227                         dma_addr_t mapping;
3228
3229                         for (i = 0; i < bp->max_tpa; i++) {
3230                                 data = __bnxt_alloc_rx_data(bp, &mapping,
3231                                                             GFP_KERNEL);
3232                                 if (!data)
3233                                         return -ENOMEM;
3234
3235                                 rxr->rx_tpa[i].data = data;
3236                                 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3237                                 rxr->rx_tpa[i].mapping = mapping;
3238                         }
3239                 } else {
3240                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
3241                         return -ENOMEM;
3242                 }
3243         }
3244
3245         return 0;
3246 }
3247
3248 static void bnxt_init_cp_rings(struct bnxt *bp)
3249 {
3250         int i, j;
3251
3252         for (i = 0; i < bp->cp_nr_rings; i++) {
3253                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3254                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3255
3256                 ring->fw_ring_id = INVALID_HW_RING_ID;
3257                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3258                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3259                 for (j = 0; j < 2; j++) {
3260                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3261
3262                         if (!cpr2)
3263                                 continue;
3264
3265                         ring = &cpr2->cp_ring_struct;
3266                         ring->fw_ring_id = INVALID_HW_RING_ID;
3267                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3268                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3269                 }
3270         }
3271 }
3272
3273 static int bnxt_init_rx_rings(struct bnxt *bp)
3274 {
3275         int i, rc = 0;
3276
3277         if (BNXT_RX_PAGE_MODE(bp)) {
3278                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3279                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3280         } else {
3281                 bp->rx_offset = BNXT_RX_OFFSET;
3282                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3283         }
3284
3285         for (i = 0; i < bp->rx_nr_rings; i++) {
3286                 rc = bnxt_init_one_rx_ring(bp, i);
3287                 if (rc)
3288                         break;
3289         }
3290
3291         return rc;
3292 }
3293
3294 static int bnxt_init_tx_rings(struct bnxt *bp)
3295 {
3296         u16 i;
3297
3298         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3299                                    MAX_SKB_FRAGS + 1);
3300
3301         for (i = 0; i < bp->tx_nr_rings; i++) {
3302                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3303                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3304
3305                 ring->fw_ring_id = INVALID_HW_RING_ID;
3306         }
3307
3308         return 0;
3309 }
3310
3311 static void bnxt_free_ring_grps(struct bnxt *bp)
3312 {
3313         kfree(bp->grp_info);
3314         bp->grp_info = NULL;
3315 }
3316
3317 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3318 {
3319         int i;
3320
3321         if (irq_re_init) {
3322                 bp->grp_info = kcalloc(bp->cp_nr_rings,
3323                                        sizeof(struct bnxt_ring_grp_info),
3324                                        GFP_KERNEL);
3325                 if (!bp->grp_info)
3326                         return -ENOMEM;
3327         }
3328         for (i = 0; i < bp->cp_nr_rings; i++) {
3329                 if (irq_re_init)
3330                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3331                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3332                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3333                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3334                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3335         }
3336         return 0;
3337 }
3338
3339 static void bnxt_free_vnics(struct bnxt *bp)
3340 {
3341         kfree(bp->vnic_info);
3342         bp->vnic_info = NULL;
3343         bp->nr_vnics = 0;
3344 }
3345
3346 static int bnxt_alloc_vnics(struct bnxt *bp)
3347 {
3348         int num_vnics = 1;
3349
3350 #ifdef CONFIG_RFS_ACCEL
3351         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3352                 num_vnics += bp->rx_nr_rings;
3353 #endif
3354
3355         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3356                 num_vnics++;
3357
3358         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3359                                 GFP_KERNEL);
3360         if (!bp->vnic_info)
3361                 return -ENOMEM;
3362
3363         bp->nr_vnics = num_vnics;
3364         return 0;
3365 }
3366
3367 static void bnxt_init_vnics(struct bnxt *bp)
3368 {
3369         int i;
3370
3371         for (i = 0; i < bp->nr_vnics; i++) {
3372                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3373                 int j;
3374
3375                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3376                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3377                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3378
3379                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3380
3381                 if (bp->vnic_info[i].rss_hash_key) {
3382                         if (i == 0)
3383                                 prandom_bytes(vnic->rss_hash_key,
3384                                               HW_HASH_KEY_SIZE);
3385                         else
3386                                 memcpy(vnic->rss_hash_key,
3387                                        bp->vnic_info[0].rss_hash_key,
3388                                        HW_HASH_KEY_SIZE);
3389                 }
3390         }
3391 }
3392
3393 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3394 {
3395         int pages;
3396
3397         pages = ring_size / desc_per_pg;
3398
3399         if (!pages)
3400                 return 1;
3401
3402         pages++;
3403
3404         while (pages & (pages - 1))
3405                 pages++;
3406
3407         return pages;
3408 }
3409
3410 void bnxt_set_tpa_flags(struct bnxt *bp)
3411 {
3412         bp->flags &= ~BNXT_FLAG_TPA;
3413         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3414                 return;
3415         if (bp->dev->features & NETIF_F_LRO)
3416                 bp->flags |= BNXT_FLAG_LRO;
3417         else if (bp->dev->features & NETIF_F_GRO_HW)
3418                 bp->flags |= BNXT_FLAG_GRO;
3419 }
3420
3421 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3422  * be set on entry.
3423  */
3424 void bnxt_set_ring_params(struct bnxt *bp)
3425 {
3426         u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3427         u32 agg_factor = 0, agg_ring_size = 0;
3428
3429         /* 8 for CRC and VLAN */
3430         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3431
3432         rx_space = rx_size + NET_SKB_PAD +
3433                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3434
3435         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3436         ring_size = bp->rx_ring_size;
3437         bp->rx_agg_ring_size = 0;
3438         bp->rx_agg_nr_pages = 0;
3439
3440         if (bp->flags & BNXT_FLAG_TPA)
3441                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3442
3443         bp->flags &= ~BNXT_FLAG_JUMBO;
3444         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3445                 u32 jumbo_factor;
3446
3447                 bp->flags |= BNXT_FLAG_JUMBO;
3448                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3449                 if (jumbo_factor > agg_factor)
3450                         agg_factor = jumbo_factor;
3451         }
3452         agg_ring_size = ring_size * agg_factor;
3453
3454         if (agg_ring_size) {
3455                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3456                                                         RX_DESC_CNT);
3457                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3458                         u32 tmp = agg_ring_size;
3459
3460                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3461                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3462                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3463                                     tmp, agg_ring_size);
3464                 }
3465                 bp->rx_agg_ring_size = agg_ring_size;
3466                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3467                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3468                 rx_space = rx_size + NET_SKB_PAD +
3469                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3470         }
3471
3472         bp->rx_buf_use_size = rx_size;
3473         bp->rx_buf_size = rx_space;
3474
3475         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3476         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3477
3478         ring_size = bp->tx_ring_size;
3479         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3480         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3481
3482         max_rx_cmpl = bp->rx_ring_size;
3483         /* MAX TPA needs to be added because TPA_START completions are
3484          * immediately recycled, so the TPA completions are not bound by
3485          * the RX ring size.
3486          */
3487         if (bp->flags & BNXT_FLAG_TPA)
3488                 max_rx_cmpl += bp->max_tpa;
3489         /* RX and TPA completions are 32-byte, all others are 16-byte */
3490         ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3491         bp->cp_ring_size = ring_size;
3492
3493         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3494         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3495                 bp->cp_nr_pages = MAX_CP_PAGES;
3496                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3497                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3498                             ring_size, bp->cp_ring_size);
3499         }
3500         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3501         bp->cp_ring_mask = bp->cp_bit - 1;
3502 }
3503
3504 /* Changing allocation mode of RX rings.
3505  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3506  */
3507 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3508 {
3509         if (page_mode) {
3510                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3511                         return -EOPNOTSUPP;
3512                 bp->dev->max_mtu =
3513                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3514                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3515                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3516                 bp->rx_dir = DMA_BIDIRECTIONAL;
3517                 bp->rx_skb_func = bnxt_rx_page_skb;
3518                 /* Disable LRO or GRO_HW */
3519                 netdev_update_features(bp->dev);
3520         } else {
3521                 bp->dev->max_mtu = bp->max_mtu;
3522                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3523                 bp->rx_dir = DMA_FROM_DEVICE;
3524                 bp->rx_skb_func = bnxt_rx_skb;
3525         }
3526         return 0;
3527 }
3528
3529 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3530 {
3531         int i;
3532         struct bnxt_vnic_info *vnic;
3533         struct pci_dev *pdev = bp->pdev;
3534
3535         if (!bp->vnic_info)
3536                 return;
3537
3538         for (i = 0; i < bp->nr_vnics; i++) {
3539                 vnic = &bp->vnic_info[i];
3540
3541                 kfree(vnic->fw_grp_ids);
3542                 vnic->fw_grp_ids = NULL;
3543
3544                 kfree(vnic->uc_list);
3545                 vnic->uc_list = NULL;
3546
3547                 if (vnic->mc_list) {
3548                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3549                                           vnic->mc_list, vnic->mc_list_mapping);
3550                         vnic->mc_list = NULL;
3551                 }
3552
3553                 if (vnic->rss_table) {
3554                         dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3555                                           vnic->rss_table,
3556                                           vnic->rss_table_dma_addr);
3557                         vnic->rss_table = NULL;
3558                 }
3559
3560                 vnic->rss_hash_key = NULL;
3561                 vnic->flags = 0;
3562         }
3563 }
3564
3565 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3566 {
3567         int i, rc = 0, size;
3568         struct bnxt_vnic_info *vnic;
3569         struct pci_dev *pdev = bp->pdev;
3570         int max_rings;
3571
3572         for (i = 0; i < bp->nr_vnics; i++) {
3573                 vnic = &bp->vnic_info[i];
3574
3575                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3576                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3577
3578                         if (mem_size > 0) {
3579                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3580                                 if (!vnic->uc_list) {
3581                                         rc = -ENOMEM;
3582                                         goto out;
3583                                 }
3584                         }
3585                 }
3586
3587                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3588                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3589                         vnic->mc_list =
3590                                 dma_alloc_coherent(&pdev->dev,
3591                                                    vnic->mc_list_size,
3592                                                    &vnic->mc_list_mapping,
3593                                                    GFP_KERNEL);
3594                         if (!vnic->mc_list) {
3595                                 rc = -ENOMEM;
3596                                 goto out;
3597                         }
3598                 }
3599
3600                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3601                         goto vnic_skip_grps;
3602
3603                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3604                         max_rings = bp->rx_nr_rings;
3605                 else
3606                         max_rings = 1;
3607
3608                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3609                 if (!vnic->fw_grp_ids) {
3610                         rc = -ENOMEM;
3611                         goto out;
3612                 }
3613 vnic_skip_grps:
3614                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3615                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3616                         continue;
3617
3618                 /* Allocate rss table and hash key */
3619                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3620                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3621                         size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3622
3623                 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3624                 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3625                                                      vnic->rss_table_size,
3626                                                      &vnic->rss_table_dma_addr,
3627                                                      GFP_KERNEL);
3628                 if (!vnic->rss_table) {
3629                         rc = -ENOMEM;
3630                         goto out;
3631                 }
3632
3633                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3634                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3635         }
3636         return 0;
3637
3638 out:
3639         return rc;
3640 }
3641
3642 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3643 {
3644         struct pci_dev *pdev = bp->pdev;
3645
3646         if (bp->hwrm_cmd_resp_addr) {
3647                 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3648                                   bp->hwrm_cmd_resp_dma_addr);
3649                 bp->hwrm_cmd_resp_addr = NULL;
3650         }
3651
3652         if (bp->hwrm_cmd_kong_resp_addr) {
3653                 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3654                                   bp->hwrm_cmd_kong_resp_addr,
3655                                   bp->hwrm_cmd_kong_resp_dma_addr);
3656                 bp->hwrm_cmd_kong_resp_addr = NULL;
3657         }
3658 }
3659
3660 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3661 {
3662         struct pci_dev *pdev = bp->pdev;
3663
3664         if (bp->hwrm_cmd_kong_resp_addr)
3665                 return 0;
3666
3667         bp->hwrm_cmd_kong_resp_addr =
3668                 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3669                                    &bp->hwrm_cmd_kong_resp_dma_addr,
3670                                    GFP_KERNEL);
3671         if (!bp->hwrm_cmd_kong_resp_addr)
3672                 return -ENOMEM;
3673
3674         return 0;
3675 }
3676
3677 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3678 {
3679         struct pci_dev *pdev = bp->pdev;
3680
3681         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3682                                                    &bp->hwrm_cmd_resp_dma_addr,
3683                                                    GFP_KERNEL);
3684         if (!bp->hwrm_cmd_resp_addr)
3685                 return -ENOMEM;
3686
3687         return 0;
3688 }
3689
3690 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3691 {
3692         if (bp->hwrm_short_cmd_req_addr) {
3693                 struct pci_dev *pdev = bp->pdev;
3694
3695                 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3696                                   bp->hwrm_short_cmd_req_addr,
3697                                   bp->hwrm_short_cmd_req_dma_addr);
3698                 bp->hwrm_short_cmd_req_addr = NULL;
3699         }
3700 }
3701
3702 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3703 {
3704         struct pci_dev *pdev = bp->pdev;
3705
3706         if (bp->hwrm_short_cmd_req_addr)
3707                 return 0;
3708
3709         bp->hwrm_short_cmd_req_addr =
3710                 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3711                                    &bp->hwrm_short_cmd_req_dma_addr,
3712                                    GFP_KERNEL);
3713         if (!bp->hwrm_short_cmd_req_addr)
3714                 return -ENOMEM;
3715
3716         return 0;
3717 }
3718
3719 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3720 {
3721         kfree(stats->hw_masks);
3722         stats->hw_masks = NULL;
3723         kfree(stats->sw_stats);
3724         stats->sw_stats = NULL;
3725         if (stats->hw_stats) {
3726                 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3727                                   stats->hw_stats_map);
3728                 stats->hw_stats = NULL;
3729         }
3730 }
3731
3732 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
3733                                 bool alloc_masks)
3734 {
3735         stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
3736                                              &stats->hw_stats_map, GFP_KERNEL);
3737         if (!stats->hw_stats)
3738                 return -ENOMEM;
3739
3740         stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
3741         if (!stats->sw_stats)
3742                 goto stats_mem_err;
3743
3744         if (alloc_masks) {
3745                 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
3746                 if (!stats->hw_masks)
3747                         goto stats_mem_err;
3748         }
3749         return 0;
3750
3751 stats_mem_err:
3752         bnxt_free_stats_mem(bp, stats);
3753         return -ENOMEM;
3754 }
3755
3756 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
3757 {
3758         int i;
3759
3760         for (i = 0; i < count; i++)
3761                 mask_arr[i] = mask;
3762 }
3763
3764 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
3765 {
3766         int i;
3767
3768         for (i = 0; i < count; i++)
3769                 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
3770 }
3771
3772 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
3773                                     struct bnxt_stats_mem *stats)
3774 {
3775         struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3776         struct hwrm_func_qstats_ext_input req = {0};
3777         __le64 *hw_masks;
3778         int rc;
3779
3780         if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
3781             !(bp->flags & BNXT_FLAG_CHIP_P5))
3782                 return -EOPNOTSUPP;
3783
3784         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
3785         req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3786         mutex_lock(&bp->hwrm_cmd_lock);
3787         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3788         if (rc)
3789                 goto qstat_exit;
3790
3791         hw_masks = &resp->rx_ucast_pkts;
3792         bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
3793
3794 qstat_exit:
3795         mutex_unlock(&bp->hwrm_cmd_lock);
3796         return rc;
3797 }
3798
3799 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
3800 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
3801
3802 static void bnxt_init_stats(struct bnxt *bp)
3803 {
3804         struct bnxt_napi *bnapi = bp->bnapi[0];
3805         struct bnxt_cp_ring_info *cpr;
3806         struct bnxt_stats_mem *stats;
3807         __le64 *rx_stats, *tx_stats;
3808         int rc, rx_count, tx_count;
3809         u64 *rx_masks, *tx_masks;
3810         u64 mask;
3811         u8 flags;
3812
3813         cpr = &bnapi->cp_ring;
3814         stats = &cpr->stats;
3815         rc = bnxt_hwrm_func_qstat_ext(bp, stats);
3816         if (rc) {
3817                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3818                         mask = (1ULL << 48) - 1;
3819                 else
3820                         mask = -1ULL;
3821                 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
3822         }
3823         if (bp->flags & BNXT_FLAG_PORT_STATS) {
3824                 stats = &bp->port_stats;
3825                 rx_stats = stats->hw_stats;
3826                 rx_masks = stats->hw_masks;
3827                 rx_count = sizeof(struct rx_port_stats) / 8;
3828                 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3829                 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3830                 tx_count = sizeof(struct tx_port_stats) / 8;
3831
3832                 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
3833                 rc = bnxt_hwrm_port_qstats(bp, flags);
3834                 if (rc) {
3835                         mask = (1ULL << 40) - 1;
3836
3837                         bnxt_fill_masks(rx_masks, mask, rx_count);
3838                         bnxt_fill_masks(tx_masks, mask, tx_count);
3839                 } else {
3840                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
3841                         bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
3842                         bnxt_hwrm_port_qstats(bp, 0);
3843                 }
3844         }
3845         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
3846                 stats = &bp->rx_port_stats_ext;
3847                 rx_stats = stats->hw_stats;
3848                 rx_masks = stats->hw_masks;
3849                 rx_count = sizeof(struct rx_port_stats_ext) / 8;
3850                 stats = &bp->tx_port_stats_ext;
3851                 tx_stats = stats->hw_stats;
3852                 tx_masks = stats->hw_masks;
3853                 tx_count = sizeof(struct tx_port_stats_ext) / 8;
3854
3855                 flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3856                 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
3857                 if (rc) {
3858                         mask = (1ULL << 40) - 1;
3859
3860                         bnxt_fill_masks(rx_masks, mask, rx_count);
3861                         if (tx_stats)
3862                                 bnxt_fill_masks(tx_masks, mask, tx_count);
3863                 } else {
3864                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
3865                         if (tx_stats)
3866                                 bnxt_copy_hw_masks(tx_masks, tx_stats,
3867                                                    tx_count);
3868                         bnxt_hwrm_port_qstats_ext(bp, 0);
3869                 }
3870         }
3871 }
3872
3873 static void bnxt_free_port_stats(struct bnxt *bp)
3874 {
3875         bp->flags &= ~BNXT_FLAG_PORT_STATS;
3876         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3877
3878         bnxt_free_stats_mem(bp, &bp->port_stats);
3879         bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
3880         bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
3881 }
3882
3883 static void bnxt_free_ring_stats(struct bnxt *bp)
3884 {
3885         int i;
3886
3887         if (!bp->bnapi)
3888                 return;
3889
3890         for (i = 0; i < bp->cp_nr_rings; i++) {
3891                 struct bnxt_napi *bnapi = bp->bnapi[i];
3892                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3893
3894                 bnxt_free_stats_mem(bp, &cpr->stats);
3895         }
3896 }
3897
3898 static int bnxt_alloc_stats(struct bnxt *bp)
3899 {
3900         u32 size, i;
3901         int rc;
3902
3903         size = bp->hw_ring_stats_size;
3904
3905         for (i = 0; i < bp->cp_nr_rings; i++) {
3906                 struct bnxt_napi *bnapi = bp->bnapi[i];
3907                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3908
3909                 cpr->stats.len = size;
3910                 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
3911                 if (rc)
3912                         return rc;
3913
3914                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3915         }
3916
3917         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
3918                 return 0;
3919
3920         if (bp->port_stats.hw_stats)
3921                 goto alloc_ext_stats;
3922
3923         bp->port_stats.len = BNXT_PORT_STATS_SIZE;
3924         rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
3925         if (rc)
3926                 return rc;
3927
3928         bp->flags |= BNXT_FLAG_PORT_STATS;
3929
3930 alloc_ext_stats:
3931         /* Display extended statistics only if FW supports it */
3932         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
3933                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
3934                         return 0;
3935
3936         if (bp->rx_port_stats_ext.hw_stats)
3937                 goto alloc_tx_ext_stats;
3938
3939         bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
3940         rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
3941         /* Extended stats are optional */
3942         if (rc)
3943                 return 0;
3944
3945 alloc_tx_ext_stats:
3946         if (bp->tx_port_stats_ext.hw_stats)
3947                 return 0;
3948
3949         if (bp->hwrm_spec_code >= 0x10902 ||
3950             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
3951                 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
3952                 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
3953                 /* Extended stats are optional */
3954                 if (rc)
3955                         return 0;
3956         }
3957         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3958         return 0;
3959 }
3960
3961 static void bnxt_clear_ring_indices(struct bnxt *bp)
3962 {
3963         int i;
3964
3965         if (!bp->bnapi)
3966                 return;
3967
3968         for (i = 0; i < bp->cp_nr_rings; i++) {
3969                 struct bnxt_napi *bnapi = bp->bnapi[i];
3970                 struct bnxt_cp_ring_info *cpr;
3971                 struct bnxt_rx_ring_info *rxr;
3972                 struct bnxt_tx_ring_info *txr;
3973
3974                 if (!bnapi)
3975                         continue;
3976
3977                 cpr = &bnapi->cp_ring;
3978                 cpr->cp_raw_cons = 0;
3979
3980                 txr = bnapi->tx_ring;
3981                 if (txr) {
3982                         txr->tx_prod = 0;
3983                         txr->tx_cons = 0;
3984                 }
3985
3986                 rxr = bnapi->rx_ring;
3987                 if (rxr) {
3988                         rxr->rx_prod = 0;
3989                         rxr->rx_agg_prod = 0;
3990                         rxr->rx_sw_agg_prod = 0;
3991                         rxr->rx_next_cons = 0;
3992                 }
3993         }
3994 }
3995
3996 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3997 {
3998 #ifdef CONFIG_RFS_ACCEL
3999         int i;
4000
4001         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
4002          * safe to delete the hash table.
4003          */
4004         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4005                 struct hlist_head *head;
4006                 struct hlist_node *tmp;
4007                 struct bnxt_ntuple_filter *fltr;
4008
4009                 head = &bp->ntp_fltr_hash_tbl[i];
4010                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4011                         hlist_del(&fltr->hash);
4012                         kfree(fltr);
4013                 }
4014         }
4015         if (irq_reinit) {
4016                 kfree(bp->ntp_fltr_bmap);
4017                 bp->ntp_fltr_bmap = NULL;
4018         }
4019         bp->ntp_fltr_count = 0;
4020 #endif
4021 }
4022
4023 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4024 {
4025 #ifdef CONFIG_RFS_ACCEL
4026         int i, rc = 0;
4027
4028         if (!(bp->flags & BNXT_FLAG_RFS))
4029                 return 0;
4030
4031         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4032                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4033
4034         bp->ntp_fltr_count = 0;
4035         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4036                                     sizeof(long),
4037                                     GFP_KERNEL);
4038
4039         if (!bp->ntp_fltr_bmap)
4040                 rc = -ENOMEM;
4041
4042         return rc;
4043 #else
4044         return 0;
4045 #endif
4046 }
4047
4048 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4049 {
4050         bnxt_free_vnic_attributes(bp);
4051         bnxt_free_tx_rings(bp);
4052         bnxt_free_rx_rings(bp);
4053         bnxt_free_cp_rings(bp);
4054         bnxt_free_ntp_fltrs(bp, irq_re_init);
4055         if (irq_re_init) {
4056                 bnxt_free_ring_stats(bp);
4057                 if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET))
4058                         bnxt_free_port_stats(bp);
4059                 bnxt_free_ring_grps(bp);
4060                 bnxt_free_vnics(bp);
4061                 kfree(bp->tx_ring_map);
4062                 bp->tx_ring_map = NULL;
4063                 kfree(bp->tx_ring);
4064                 bp->tx_ring = NULL;
4065                 kfree(bp->rx_ring);
4066                 bp->rx_ring = NULL;
4067                 kfree(bp->bnapi);
4068                 bp->bnapi = NULL;
4069         } else {
4070                 bnxt_clear_ring_indices(bp);
4071         }
4072 }
4073
4074 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4075 {
4076         int i, j, rc, size, arr_size;
4077         void *bnapi;
4078
4079         if (irq_re_init) {
4080                 /* Allocate bnapi mem pointer array and mem block for
4081                  * all queues
4082                  */
4083                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4084                                 bp->cp_nr_rings);
4085                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4086                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4087                 if (!bnapi)
4088                         return -ENOMEM;
4089
4090                 bp->bnapi = bnapi;
4091                 bnapi += arr_size;
4092                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4093                         bp->bnapi[i] = bnapi;
4094                         bp->bnapi[i]->index = i;
4095                         bp->bnapi[i]->bp = bp;
4096                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4097                                 struct bnxt_cp_ring_info *cpr =
4098                                         &bp->bnapi[i]->cp_ring;
4099
4100                                 cpr->cp_ring_struct.ring_mem.flags =
4101                                         BNXT_RMEM_RING_PTE_FLAG;
4102                         }
4103                 }
4104
4105                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4106                                       sizeof(struct bnxt_rx_ring_info),
4107                                       GFP_KERNEL);
4108                 if (!bp->rx_ring)
4109                         return -ENOMEM;
4110
4111                 for (i = 0; i < bp->rx_nr_rings; i++) {
4112                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4113
4114                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4115                                 rxr->rx_ring_struct.ring_mem.flags =
4116                                         BNXT_RMEM_RING_PTE_FLAG;
4117                                 rxr->rx_agg_ring_struct.ring_mem.flags =
4118                                         BNXT_RMEM_RING_PTE_FLAG;
4119                         }
4120                         rxr->bnapi = bp->bnapi[i];
4121                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4122                 }
4123
4124                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4125                                       sizeof(struct bnxt_tx_ring_info),
4126                                       GFP_KERNEL);
4127                 if (!bp->tx_ring)
4128                         return -ENOMEM;
4129
4130                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4131                                           GFP_KERNEL);
4132
4133                 if (!bp->tx_ring_map)
4134                         return -ENOMEM;
4135
4136                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4137                         j = 0;
4138                 else
4139                         j = bp->rx_nr_rings;
4140
4141                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4142                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4143
4144                         if (bp->flags & BNXT_FLAG_CHIP_P5)
4145                                 txr->tx_ring_struct.ring_mem.flags =
4146                                         BNXT_RMEM_RING_PTE_FLAG;
4147                         txr->bnapi = bp->bnapi[j];
4148                         bp->bnapi[j]->tx_ring = txr;
4149                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4150                         if (i >= bp->tx_nr_rings_xdp) {
4151                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4152                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
4153                         } else {
4154                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4155                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4156                         }
4157                 }
4158
4159                 rc = bnxt_alloc_stats(bp);
4160                 if (rc)
4161                         goto alloc_mem_err;
4162                 bnxt_init_stats(bp);
4163
4164                 rc = bnxt_alloc_ntp_fltrs(bp);
4165                 if (rc)
4166                         goto alloc_mem_err;
4167
4168                 rc = bnxt_alloc_vnics(bp);
4169                 if (rc)
4170                         goto alloc_mem_err;
4171         }
4172
4173         bnxt_init_ring_struct(bp);
4174
4175         rc = bnxt_alloc_rx_rings(bp);
4176         if (rc)
4177                 goto alloc_mem_err;
4178
4179         rc = bnxt_alloc_tx_rings(bp);
4180         if (rc)
4181                 goto alloc_mem_err;
4182
4183         rc = bnxt_alloc_cp_rings(bp);
4184         if (rc)
4185                 goto alloc_mem_err;
4186
4187         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4188                                   BNXT_VNIC_UCAST_FLAG;
4189         rc = bnxt_alloc_vnic_attributes(bp);
4190         if (rc)
4191                 goto alloc_mem_err;
4192         return 0;
4193
4194 alloc_mem_err:
4195         bnxt_free_mem(bp, true);
4196         return rc;
4197 }
4198
4199 static void bnxt_disable_int(struct bnxt *bp)
4200 {
4201         int i;
4202
4203         if (!bp->bnapi)
4204                 return;
4205
4206         for (i = 0; i < bp->cp_nr_rings; i++) {
4207                 struct bnxt_napi *bnapi = bp->bnapi[i];
4208                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4209                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4210
4211                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4212                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4213         }
4214 }
4215
4216 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4217 {
4218         struct bnxt_napi *bnapi = bp->bnapi[n];
4219         struct bnxt_cp_ring_info *cpr;
4220
4221         cpr = &bnapi->cp_ring;
4222         return cpr->cp_ring_struct.map_idx;
4223 }
4224
4225 static void bnxt_disable_int_sync(struct bnxt *bp)
4226 {
4227         int i;
4228
4229         atomic_inc(&bp->intr_sem);
4230
4231         bnxt_disable_int(bp);
4232         for (i = 0; i < bp->cp_nr_rings; i++) {
4233                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4234
4235                 synchronize_irq(bp->irq_tbl[map_idx].vector);
4236         }
4237 }
4238
4239 static void bnxt_enable_int(struct bnxt *bp)
4240 {
4241         int i;
4242
4243         atomic_set(&bp->intr_sem, 0);
4244         for (i = 0; i < bp->cp_nr_rings; i++) {
4245                 struct bnxt_napi *bnapi = bp->bnapi[i];
4246                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4247
4248                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4249         }
4250 }
4251
4252 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4253                             u16 cmpl_ring, u16 target_id)
4254 {
4255         struct input *req = request;
4256
4257         req->req_type = cpu_to_le16(req_type);
4258         req->cmpl_ring = cpu_to_le16(cmpl_ring);
4259         req->target_id = cpu_to_le16(target_id);
4260         if (bnxt_kong_hwrm_message(bp, req))
4261                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4262         else
4263                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4264 }
4265
4266 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4267 {
4268         switch (hwrm_err) {
4269         case HWRM_ERR_CODE_SUCCESS:
4270                 return 0;
4271         case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4272                 return -EACCES;
4273         case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4274                 return -ENOSPC;
4275         case HWRM_ERR_CODE_INVALID_PARAMS:
4276         case HWRM_ERR_CODE_INVALID_FLAGS:
4277         case HWRM_ERR_CODE_INVALID_ENABLES:
4278         case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4279         case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4280                 return -EINVAL;
4281         case HWRM_ERR_CODE_NO_BUFFER:
4282                 return -ENOMEM;
4283         case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4284         case HWRM_ERR_CODE_BUSY:
4285                 return -EAGAIN;
4286         case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4287                 return -EOPNOTSUPP;
4288         default:
4289                 return -EIO;
4290         }
4291 }
4292
4293 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4294                                  int timeout, bool silent)
4295 {
4296         int i, intr_process, rc, tmo_count;
4297         struct input *req = msg;
4298         u32 *data = msg;
4299         u8 *valid;
4300         u16 cp_ring_id, len = 0;
4301         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4302         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4303         struct hwrm_short_input short_input = {0};
4304         u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4305         u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4306         u16 dst = BNXT_HWRM_CHNL_CHIMP;
4307
4308         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4309                 return -EBUSY;
4310
4311         if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4312                 if (msg_len > bp->hwrm_max_ext_req_len ||
4313                     !bp->hwrm_short_cmd_req_addr)
4314                         return -EINVAL;
4315         }
4316
4317         if (bnxt_hwrm_kong_chnl(bp, req)) {
4318                 dst = BNXT_HWRM_CHNL_KONG;
4319                 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4320                 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4321                 resp = bp->hwrm_cmd_kong_resp_addr;
4322         }
4323
4324         memset(resp, 0, PAGE_SIZE);
4325         cp_ring_id = le16_to_cpu(req->cmpl_ring);
4326         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4327
4328         req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4329         /* currently supports only one outstanding message */
4330         if (intr_process)
4331                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4332
4333         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4334             msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4335                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4336                 u16 max_msg_len;
4337
4338                 /* Set boundary for maximum extended request length for short
4339                  * cmd format. If passed up from device use the max supported
4340                  * internal req length.
4341                  */
4342                 max_msg_len = bp->hwrm_max_ext_req_len;
4343
4344                 memcpy(short_cmd_req, req, msg_len);
4345                 if (msg_len < max_msg_len)
4346                         memset(short_cmd_req + msg_len, 0,
4347                                max_msg_len - msg_len);
4348
4349                 short_input.req_type = req->req_type;
4350                 short_input.signature =
4351                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4352                 short_input.size = cpu_to_le16(msg_len);
4353                 short_input.req_addr =
4354                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4355
4356                 data = (u32 *)&short_input;
4357                 msg_len = sizeof(short_input);
4358
4359                 /* Sync memory write before updating doorbell */
4360                 wmb();
4361
4362                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4363         }
4364
4365         /* Write request msg to hwrm channel */
4366         __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4367
4368         for (i = msg_len; i < max_req_len; i += 4)
4369                 writel(0, bp->bar0 + bar_offset + i);
4370
4371         /* Ring channel doorbell */
4372         writel(1, bp->bar0 + doorbell_offset);
4373
4374         if (!pci_is_enabled(bp->pdev))
4375                 return 0;
4376
4377         if (!timeout)
4378                 timeout = DFLT_HWRM_CMD_TIMEOUT;
4379         /* convert timeout to usec */
4380         timeout *= 1000;
4381
4382         i = 0;
4383         /* Short timeout for the first few iterations:
4384          * number of loops = number of loops for short timeout +
4385          * number of loops for standard timeout.
4386          */
4387         tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4388         timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4389         tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4390
4391         if (intr_process) {
4392                 u16 seq_id = bp->hwrm_intr_seq_id;
4393
4394                 /* Wait until hwrm response cmpl interrupt is processed */
4395                 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4396                        i++ < tmo_count) {
4397                         /* Abort the wait for completion if the FW health
4398                          * check has failed.
4399                          */
4400                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4401                                 return -EBUSY;
4402                         /* on first few passes, just barely sleep */
4403                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4404                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4405                                              HWRM_SHORT_MAX_TIMEOUT);
4406                         else
4407                                 usleep_range(HWRM_MIN_TIMEOUT,
4408                                              HWRM_MAX_TIMEOUT);
4409                 }
4410
4411                 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4412                         if (!silent)
4413                                 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4414                                            le16_to_cpu(req->req_type));
4415                         return -EBUSY;
4416                 }
4417                 len = le16_to_cpu(resp->resp_len);
4418                 valid = ((u8 *)resp) + len - 1;
4419         } else {
4420                 int j;
4421
4422                 /* Check if response len is updated */
4423                 for (i = 0; i < tmo_count; i++) {
4424                         /* Abort the wait for completion if the FW health
4425                          * check has failed.
4426                          */
4427                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4428                                 return -EBUSY;
4429                         len = le16_to_cpu(resp->resp_len);
4430                         if (len)
4431                                 break;
4432                         /* on first few passes, just barely sleep */
4433                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4434                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4435                                              HWRM_SHORT_MAX_TIMEOUT);
4436                         else
4437                                 usleep_range(HWRM_MIN_TIMEOUT,
4438                                              HWRM_MAX_TIMEOUT);
4439                 }
4440
4441                 if (i >= tmo_count) {
4442                         if (!silent)
4443                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4444                                            HWRM_TOTAL_TIMEOUT(i),
4445                                            le16_to_cpu(req->req_type),
4446                                            le16_to_cpu(req->seq_id), len);
4447                         return -EBUSY;
4448                 }
4449
4450                 /* Last byte of resp contains valid bit */
4451                 valid = ((u8 *)resp) + len - 1;
4452                 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4453                         /* make sure we read from updated DMA memory */
4454                         dma_rmb();
4455                         if (*valid)
4456                                 break;
4457                         usleep_range(1, 5);
4458                 }
4459
4460                 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4461                         if (!silent)
4462                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4463                                            HWRM_TOTAL_TIMEOUT(i),
4464                                            le16_to_cpu(req->req_type),
4465                                            le16_to_cpu(req->seq_id), len,
4466                                            *valid);
4467                         return -EBUSY;
4468                 }
4469         }
4470
4471         /* Zero valid bit for compatibility.  Valid bit in an older spec
4472          * may become a new field in a newer spec.  We must make sure that
4473          * a new field not implemented by old spec will read zero.
4474          */
4475         *valid = 0;
4476         rc = le16_to_cpu(resp->error_code);
4477         if (rc && !silent)
4478                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4479                            le16_to_cpu(resp->req_type),
4480                            le16_to_cpu(resp->seq_id), rc);
4481         return bnxt_hwrm_to_stderr(rc);
4482 }
4483
4484 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4485 {
4486         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4487 }
4488
4489 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4490                               int timeout)
4491 {
4492         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4493 }
4494
4495 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4496 {
4497         int rc;
4498
4499         mutex_lock(&bp->hwrm_cmd_lock);
4500         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4501         mutex_unlock(&bp->hwrm_cmd_lock);
4502         return rc;
4503 }
4504
4505 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4506                              int timeout)
4507 {
4508         int rc;
4509
4510         mutex_lock(&bp->hwrm_cmd_lock);
4511         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4512         mutex_unlock(&bp->hwrm_cmd_lock);
4513         return rc;
4514 }
4515
4516 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4517                             bool async_only)
4518 {
4519         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4520         struct hwrm_func_drv_rgtr_input req = {0};
4521         DECLARE_BITMAP(async_events_bmap, 256);
4522         u32 *events = (u32 *)async_events_bmap;
4523         u32 flags;
4524         int rc, i;
4525
4526         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4527
4528         req.enables =
4529                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4530                             FUNC_DRV_RGTR_REQ_ENABLES_VER |
4531                             FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4532
4533         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4534         flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4535         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4536                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4537         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4538                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4539                          FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4540         req.flags = cpu_to_le32(flags);
4541         req.ver_maj_8b = DRV_VER_MAJ;
4542         req.ver_min_8b = DRV_VER_MIN;
4543         req.ver_upd_8b = DRV_VER_UPD;
4544         req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4545         req.ver_min = cpu_to_le16(DRV_VER_MIN);
4546         req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4547
4548         if (BNXT_PF(bp)) {
4549                 u32 data[8];
4550                 int i;
4551
4552                 memset(data, 0, sizeof(data));
4553                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4554                         u16 cmd = bnxt_vf_req_snif[i];
4555                         unsigned int bit, idx;
4556
4557                         idx = cmd / 32;
4558                         bit = cmd % 32;
4559                         data[idx] |= 1 << bit;
4560                 }
4561
4562                 for (i = 0; i < 8; i++)
4563                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4564
4565                 req.enables |=
4566                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4567         }
4568
4569         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4570                 req.flags |= cpu_to_le32(
4571                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4572
4573         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4574         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4575                 u16 event_id = bnxt_async_events_arr[i];
4576
4577                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4578                     !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4579                         continue;
4580                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4581         }
4582         if (bmap && bmap_size) {
4583                 for (i = 0; i < bmap_size; i++) {
4584                         if (test_bit(i, bmap))
4585                                 __set_bit(i, async_events_bmap);
4586                 }
4587         }
4588         for (i = 0; i < 8; i++)
4589                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4590
4591         if (async_only)
4592                 req.enables =
4593                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4594
4595         mutex_lock(&bp->hwrm_cmd_lock);
4596         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4597         if (!rc) {
4598                 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4599                 if (resp->flags &
4600                     cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4601                         bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4602         }
4603         mutex_unlock(&bp->hwrm_cmd_lock);
4604         return rc;
4605 }
4606
4607 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4608 {
4609         struct hwrm_func_drv_unrgtr_input req = {0};
4610
4611         if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4612                 return 0;
4613
4614         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4615         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4616 }
4617
4618 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4619 {
4620         u32 rc = 0;
4621         struct hwrm_tunnel_dst_port_free_input req = {0};
4622
4623         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4624         req.tunnel_type = tunnel_type;
4625
4626         switch (tunnel_type) {
4627         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4628                 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4629                 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4630                 break;
4631         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4632                 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4633                 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4634                 break;
4635         default:
4636                 break;
4637         }
4638
4639         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4640         if (rc)
4641                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4642                            rc);
4643         return rc;
4644 }
4645
4646 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4647                                            u8 tunnel_type)
4648 {
4649         u32 rc = 0;
4650         struct hwrm_tunnel_dst_port_alloc_input req = {0};
4651         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4652
4653         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4654
4655         req.tunnel_type = tunnel_type;
4656         req.tunnel_dst_port_val = port;
4657
4658         mutex_lock(&bp->hwrm_cmd_lock);
4659         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4660         if (rc) {
4661                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4662                            rc);
4663                 goto err_out;
4664         }
4665
4666         switch (tunnel_type) {
4667         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4668                 bp->vxlan_fw_dst_port_id =
4669                         le16_to_cpu(resp->tunnel_dst_port_id);
4670                 break;
4671         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4672                 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4673                 break;
4674         default:
4675                 break;
4676         }
4677
4678 err_out:
4679         mutex_unlock(&bp->hwrm_cmd_lock);
4680         return rc;
4681 }
4682
4683 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4684 {
4685         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4686         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4687
4688         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4689         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4690
4691         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4692         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4693         req.mask = cpu_to_le32(vnic->rx_mask);
4694         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4695 }
4696
4697 #ifdef CONFIG_RFS_ACCEL
4698 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4699                                             struct bnxt_ntuple_filter *fltr)
4700 {
4701         struct hwrm_cfa_ntuple_filter_free_input req = {0};
4702
4703         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4704         req.ntuple_filter_id = fltr->filter_id;
4705         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4706 }
4707
4708 #define BNXT_NTP_FLTR_FLAGS                                     \
4709         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4710          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4711          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4712          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4713          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4714          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4715          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4716          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4717          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4718          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4719          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4720          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4721          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4722          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4723
4724 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4725                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4726
4727 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4728                                              struct bnxt_ntuple_filter *fltr)
4729 {
4730         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4731         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4732         struct flow_keys *keys = &fltr->fkeys;
4733         struct bnxt_vnic_info *vnic;
4734         u32 flags = 0;
4735         int rc = 0;
4736
4737         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4738         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4739
4740         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4741                 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4742                 req.dst_id = cpu_to_le16(fltr->rxq);
4743         } else {
4744                 vnic = &bp->vnic_info[fltr->rxq + 1];
4745                 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4746         }
4747         req.flags = cpu_to_le32(flags);
4748         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4749
4750         req.ethertype = htons(ETH_P_IP);
4751         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4752         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4753         req.ip_protocol = keys->basic.ip_proto;
4754
4755         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4756                 int i;
4757
4758                 req.ethertype = htons(ETH_P_IPV6);
4759                 req.ip_addr_type =
4760                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4761                 *(struct in6_addr *)&req.src_ipaddr[0] =
4762                         keys->addrs.v6addrs.src;
4763                 *(struct in6_addr *)&req.dst_ipaddr[0] =
4764                         keys->addrs.v6addrs.dst;
4765                 for (i = 0; i < 4; i++) {
4766                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4767                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4768                 }
4769         } else {
4770                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4771                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4772                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4773                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4774         }
4775         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4776                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4777                 req.tunnel_type =
4778                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4779         }
4780
4781         req.src_port = keys->ports.src;
4782         req.src_port_mask = cpu_to_be16(0xffff);
4783         req.dst_port = keys->ports.dst;
4784         req.dst_port_mask = cpu_to_be16(0xffff);
4785
4786         mutex_lock(&bp->hwrm_cmd_lock);
4787         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4788         if (!rc) {
4789                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4790                 fltr->filter_id = resp->ntuple_filter_id;
4791         }
4792         mutex_unlock(&bp->hwrm_cmd_lock);
4793         return rc;
4794 }
4795 #endif
4796
4797 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4798                                      u8 *mac_addr)
4799 {
4800         u32 rc = 0;
4801         struct hwrm_cfa_l2_filter_alloc_input req = {0};
4802         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4803
4804         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4805         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4806         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4807                 req.flags |=
4808                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4809         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4810         req.enables =
4811                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4812                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4813                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4814         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4815         req.l2_addr_mask[0] = 0xff;
4816         req.l2_addr_mask[1] = 0xff;
4817         req.l2_addr_mask[2] = 0xff;
4818         req.l2_addr_mask[3] = 0xff;
4819         req.l2_addr_mask[4] = 0xff;
4820         req.l2_addr_mask[5] = 0xff;
4821
4822         mutex_lock(&bp->hwrm_cmd_lock);
4823         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4824         if (!rc)
4825                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4826                                                         resp->l2_filter_id;
4827         mutex_unlock(&bp->hwrm_cmd_lock);
4828         return rc;
4829 }
4830
4831 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4832 {
4833         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4834         int rc = 0;
4835
4836         /* Any associated ntuple filters will also be cleared by firmware. */
4837         mutex_lock(&bp->hwrm_cmd_lock);
4838         for (i = 0; i < num_of_vnics; i++) {
4839                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4840
4841                 for (j = 0; j < vnic->uc_filter_count; j++) {
4842                         struct hwrm_cfa_l2_filter_free_input req = {0};
4843
4844                         bnxt_hwrm_cmd_hdr_init(bp, &req,
4845                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
4846
4847                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
4848
4849                         rc = _hwrm_send_message(bp, &req, sizeof(req),
4850                                                 HWRM_CMD_TIMEOUT);
4851                 }
4852                 vnic->uc_filter_count = 0;
4853         }
4854         mutex_unlock(&bp->hwrm_cmd_lock);
4855
4856         return rc;
4857 }
4858
4859 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4860 {
4861         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4862         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4863         struct hwrm_vnic_tpa_cfg_input req = {0};
4864
4865         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4866                 return 0;
4867
4868         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4869
4870         if (tpa_flags) {
4871                 u16 mss = bp->dev->mtu - 40;
4872                 u32 nsegs, n, segs = 0, flags;
4873
4874                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4875                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4876                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4877                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4878                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4879                 if (tpa_flags & BNXT_FLAG_GRO)
4880                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4881
4882                 req.flags = cpu_to_le32(flags);
4883
4884                 req.enables =
4885                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4886                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4887                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4888
4889                 /* Number of segs are log2 units, and first packet is not
4890                  * included as part of this units.
4891                  */
4892                 if (mss <= BNXT_RX_PAGE_SIZE) {
4893                         n = BNXT_RX_PAGE_SIZE / mss;
4894                         nsegs = (MAX_SKB_FRAGS - 1) * n;
4895                 } else {
4896                         n = mss / BNXT_RX_PAGE_SIZE;
4897                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
4898                                 n++;
4899                         nsegs = (MAX_SKB_FRAGS - n) / n;
4900                 }
4901
4902                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4903                         segs = MAX_TPA_SEGS_P5;
4904                         max_aggs = bp->max_tpa;
4905                 } else {
4906                         segs = ilog2(nsegs);
4907                 }
4908                 req.max_agg_segs = cpu_to_le16(segs);
4909                 req.max_aggs = cpu_to_le16(max_aggs);
4910
4911                 req.min_agg_len = cpu_to_le32(512);
4912         }
4913         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4914
4915         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4916 }
4917
4918 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4919 {
4920         struct bnxt_ring_grp_info *grp_info;
4921
4922         grp_info = &bp->grp_info[ring->grp_idx];
4923         return grp_info->cp_fw_ring_id;
4924 }
4925
4926 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4927 {
4928         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4929                 struct bnxt_napi *bnapi = rxr->bnapi;
4930                 struct bnxt_cp_ring_info *cpr;
4931
4932                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4933                 return cpr->cp_ring_struct.fw_ring_id;
4934         } else {
4935                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4936         }
4937 }
4938
4939 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4940 {
4941         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4942                 struct bnxt_napi *bnapi = txr->bnapi;
4943                 struct bnxt_cp_ring_info *cpr;
4944
4945                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4946                 return cpr->cp_ring_struct.fw_ring_id;
4947         } else {
4948                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4949         }
4950 }
4951
4952 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
4953 {
4954         int entries;
4955
4956         if (bp->flags & BNXT_FLAG_CHIP_P5)
4957                 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
4958         else
4959                 entries = HW_HASH_INDEX_SIZE;
4960
4961         bp->rss_indir_tbl_entries = entries;
4962         bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
4963                                           GFP_KERNEL);
4964         if (!bp->rss_indir_tbl)
4965                 return -ENOMEM;
4966         return 0;
4967 }
4968
4969 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
4970 {
4971         u16 max_rings, max_entries, pad, i;
4972
4973         if (!bp->rx_nr_rings)
4974                 return;
4975
4976         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4977                 max_rings = bp->rx_nr_rings - 1;
4978         else
4979                 max_rings = bp->rx_nr_rings;
4980
4981         max_entries = bnxt_get_rxfh_indir_size(bp->dev);
4982
4983         for (i = 0; i < max_entries; i++)
4984                 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
4985
4986         pad = bp->rss_indir_tbl_entries - max_entries;
4987         if (pad)
4988                 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
4989 }
4990
4991 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
4992 {
4993         u16 i, tbl_size, max_ring = 0;
4994
4995         if (!bp->rss_indir_tbl)
4996                 return 0;
4997
4998         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
4999         for (i = 0; i < tbl_size; i++)
5000                 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5001         return max_ring;
5002 }
5003
5004 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5005 {
5006         if (bp->flags & BNXT_FLAG_CHIP_P5)
5007                 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5008         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5009                 return 2;
5010         return 1;
5011 }
5012
5013 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5014 {
5015         bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5016         u16 i, j;
5017
5018         /* Fill the RSS indirection table with ring group ids */
5019         for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5020                 if (!no_rss)
5021                         j = bp->rss_indir_tbl[i];
5022                 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5023         }
5024 }
5025
5026 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5027                                       struct bnxt_vnic_info *vnic)
5028 {
5029         __le16 *ring_tbl = vnic->rss_table;
5030         struct bnxt_rx_ring_info *rxr;
5031         u16 tbl_size, i;
5032
5033         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5034
5035         for (i = 0; i < tbl_size; i++) {
5036                 u16 ring_id, j;
5037
5038                 j = bp->rss_indir_tbl[i];
5039                 rxr = &bp->rx_ring[j];
5040
5041                 ring_id = rxr->rx_ring_struct.fw_ring_id;
5042                 *ring_tbl++ = cpu_to_le16(ring_id);
5043                 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5044                 *ring_tbl++ = cpu_to_le16(ring_id);
5045         }
5046 }
5047
5048 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5049 {
5050         if (bp->flags & BNXT_FLAG_CHIP_P5)
5051                 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5052         else
5053                 __bnxt_fill_hw_rss_tbl(bp, vnic);
5054 }
5055
5056 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5057 {
5058         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5059         struct hwrm_vnic_rss_cfg_input req = {0};
5060
5061         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5062             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5063                 return 0;
5064
5065         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5066         if (set_rss) {
5067                 bnxt_fill_hw_rss_tbl(bp, vnic);
5068                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5069                 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5070                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5071                 req.hash_key_tbl_addr =
5072                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
5073         }
5074         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5075         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5076 }
5077
5078 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5079 {
5080         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5081         struct hwrm_vnic_rss_cfg_input req = {0};
5082         dma_addr_t ring_tbl_map;
5083         u32 i, nr_ctxs;
5084
5085         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5086         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5087         if (!set_rss) {
5088                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5089                 return 0;
5090         }
5091         bnxt_fill_hw_rss_tbl(bp, vnic);
5092         req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5093         req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5094         req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5095         ring_tbl_map = vnic->rss_table_dma_addr;
5096         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5097         for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5098                 int rc;
5099
5100                 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5101                 req.ring_table_pair_index = i;
5102                 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5103                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5104                 if (rc)
5105                         return rc;
5106         }
5107         return 0;
5108 }
5109
5110 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5111 {
5112         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5113         struct hwrm_vnic_plcmodes_cfg_input req = {0};
5114
5115         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5116         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5117                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5118                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5119         req.enables =
5120                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5121                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5122         /* thresholds not implemented in firmware yet */
5123         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5124         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5125         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5126         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5127 }
5128
5129 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5130                                         u16 ctx_idx)
5131 {
5132         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
5133
5134         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5135         req.rss_cos_lb_ctx_id =
5136                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5137
5138         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5139         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5140 }
5141
5142 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5143 {
5144         int i, j;
5145
5146         for (i = 0; i < bp->nr_vnics; i++) {
5147                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5148
5149                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5150                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5151                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5152                 }
5153         }
5154         bp->rsscos_nr_ctxs = 0;
5155 }
5156
5157 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5158 {
5159         int rc;
5160         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
5161         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
5162                                                 bp->hwrm_cmd_resp_addr;
5163
5164         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5165                                -1);
5166
5167         mutex_lock(&bp->hwrm_cmd_lock);
5168         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5169         if (!rc)
5170                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5171                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
5172         mutex_unlock(&bp->hwrm_cmd_lock);
5173
5174         return rc;
5175 }
5176
5177 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5178 {
5179         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5180                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5181         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5182 }
5183
5184 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5185 {
5186         unsigned int ring = 0, grp_idx;
5187         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5188         struct hwrm_vnic_cfg_input req = {0};
5189         u16 def_vlan = 0;
5190
5191         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5192
5193         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5194                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5195
5196                 req.default_rx_ring_id =
5197                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5198                 req.default_cmpl_ring_id =
5199                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5200                 req.enables =
5201                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5202                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5203                 goto vnic_mru;
5204         }
5205         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5206         /* Only RSS support for now TBD: COS & LB */
5207         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5208                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5209                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5210                                            VNIC_CFG_REQ_ENABLES_MRU);
5211         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5212                 req.rss_rule =
5213                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5214                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5215                                            VNIC_CFG_REQ_ENABLES_MRU);
5216                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5217         } else {
5218                 req.rss_rule = cpu_to_le16(0xffff);
5219         }
5220
5221         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5222             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5223                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5224                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5225         } else {
5226                 req.cos_rule = cpu_to_le16(0xffff);
5227         }
5228
5229         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5230                 ring = 0;
5231         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5232                 ring = vnic_id - 1;
5233         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5234                 ring = bp->rx_nr_rings - 1;
5235
5236         grp_idx = bp->rx_ring[ring].bnapi->index;
5237         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5238         req.lb_rule = cpu_to_le16(0xffff);
5239 vnic_mru:
5240         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5241
5242         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5243 #ifdef CONFIG_BNXT_SRIOV
5244         if (BNXT_VF(bp))
5245                 def_vlan = bp->vf.vlan;
5246 #endif
5247         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5248                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5249         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5250                 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5251
5252         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5253 }
5254
5255 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5256 {
5257         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5258                 struct hwrm_vnic_free_input req = {0};
5259
5260                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5261                 req.vnic_id =
5262                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5263
5264                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5265                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5266         }
5267 }
5268
5269 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5270 {
5271         u16 i;
5272
5273         for (i = 0; i < bp->nr_vnics; i++)
5274                 bnxt_hwrm_vnic_free_one(bp, i);
5275 }
5276
5277 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5278                                 unsigned int start_rx_ring_idx,
5279                                 unsigned int nr_rings)
5280 {
5281         int rc = 0;
5282         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5283         struct hwrm_vnic_alloc_input req = {0};
5284         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5285         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5286
5287         if (bp->flags & BNXT_FLAG_CHIP_P5)
5288                 goto vnic_no_ring_grps;
5289
5290         /* map ring groups to this vnic */
5291         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5292                 grp_idx = bp->rx_ring[i].bnapi->index;
5293                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5294                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5295                                    j, nr_rings);
5296                         break;
5297                 }
5298                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5299         }
5300
5301 vnic_no_ring_grps:
5302         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5303                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5304         if (vnic_id == 0)
5305                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5306
5307         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5308
5309         mutex_lock(&bp->hwrm_cmd_lock);
5310         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5311         if (!rc)
5312                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5313         mutex_unlock(&bp->hwrm_cmd_lock);
5314         return rc;
5315 }
5316
5317 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5318 {
5319         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5320         struct hwrm_vnic_qcaps_input req = {0};
5321         int rc;
5322
5323         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5324         bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5325         if (bp->hwrm_spec_code < 0x10600)
5326                 return 0;
5327
5328         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5329         mutex_lock(&bp->hwrm_cmd_lock);
5330         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5331         if (!rc) {
5332                 u32 flags = le32_to_cpu(resp->flags);
5333
5334                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5335                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5336                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5337                 if (flags &
5338                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5339                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5340
5341                 /* Older P5 fw before EXT_HW_STATS support did not set
5342                  * VLAN_STRIP_CAP properly.
5343                  */
5344                 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5345                     ((bp->flags & BNXT_FLAG_CHIP_P5) &&
5346                      !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5347                         bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5348                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5349                 if (bp->max_tpa_v2)
5350                         bp->hw_ring_stats_size =
5351                                 sizeof(struct ctx_hw_stats_ext);
5352         }
5353         mutex_unlock(&bp->hwrm_cmd_lock);
5354         return rc;
5355 }
5356
5357 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5358 {
5359         u16 i;
5360         u32 rc = 0;
5361
5362         if (bp->flags & BNXT_FLAG_CHIP_P5)
5363                 return 0;
5364
5365         mutex_lock(&bp->hwrm_cmd_lock);
5366         for (i = 0; i < bp->rx_nr_rings; i++) {
5367                 struct hwrm_ring_grp_alloc_input req = {0};
5368                 struct hwrm_ring_grp_alloc_output *resp =
5369                                         bp->hwrm_cmd_resp_addr;
5370                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5371
5372                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5373
5374                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5375                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5376                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5377                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5378
5379                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5380                                         HWRM_CMD_TIMEOUT);
5381                 if (rc)
5382                         break;
5383
5384                 bp->grp_info[grp_idx].fw_grp_id =
5385                         le32_to_cpu(resp->ring_group_id);
5386         }
5387         mutex_unlock(&bp->hwrm_cmd_lock);
5388         return rc;
5389 }
5390
5391 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5392 {
5393         u16 i;
5394         struct hwrm_ring_grp_free_input req = {0};
5395
5396         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5397                 return;
5398
5399         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5400
5401         mutex_lock(&bp->hwrm_cmd_lock);
5402         for (i = 0; i < bp->cp_nr_rings; i++) {
5403                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5404                         continue;
5405                 req.ring_group_id =
5406                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
5407
5408                 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5409                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5410         }
5411         mutex_unlock(&bp->hwrm_cmd_lock);
5412 }
5413
5414 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5415                                     struct bnxt_ring_struct *ring,
5416                                     u32 ring_type, u32 map_index)
5417 {
5418         int rc = 0, err = 0;
5419         struct hwrm_ring_alloc_input req = {0};
5420         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5421         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5422         struct bnxt_ring_grp_info *grp_info;
5423         u16 ring_id;
5424
5425         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5426
5427         req.enables = 0;
5428         if (rmem->nr_pages > 1) {
5429                 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5430                 /* Page size is in log2 units */
5431                 req.page_size = BNXT_PAGE_SHIFT;
5432                 req.page_tbl_depth = 1;
5433         } else {
5434                 req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5435         }
5436         req.fbo = 0;
5437         /* Association of ring index with doorbell index and MSIX number */
5438         req.logical_id = cpu_to_le16(map_index);
5439
5440         switch (ring_type) {
5441         case HWRM_RING_ALLOC_TX: {
5442                 struct bnxt_tx_ring_info *txr;
5443
5444                 txr = container_of(ring, struct bnxt_tx_ring_info,
5445                                    tx_ring_struct);
5446                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5447                 /* Association of transmit ring with completion ring */
5448                 grp_info = &bp->grp_info[ring->grp_idx];
5449                 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5450                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5451                 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5452                 req.queue_id = cpu_to_le16(ring->queue_id);
5453                 break;
5454         }
5455         case HWRM_RING_ALLOC_RX:
5456                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5457                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5458                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5459                         u16 flags = 0;
5460
5461                         /* Association of rx ring with stats context */
5462                         grp_info = &bp->grp_info[ring->grp_idx];
5463                         req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5464                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5465                         req.enables |= cpu_to_le32(
5466                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5467                         if (NET_IP_ALIGN == 2)
5468                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5469                         req.flags = cpu_to_le16(flags);
5470                 }
5471                 break;
5472         case HWRM_RING_ALLOC_AGG:
5473                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5474                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5475                         /* Association of agg ring with rx ring */
5476                         grp_info = &bp->grp_info[ring->grp_idx];
5477                         req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5478                         req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5479                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5480                         req.enables |= cpu_to_le32(
5481                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5482                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5483                 } else {
5484                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5485                 }
5486                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5487                 break;
5488         case HWRM_RING_ALLOC_CMPL:
5489                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5490                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5491                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5492                         /* Association of cp ring with nq */
5493                         grp_info = &bp->grp_info[map_index];
5494                         req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5495                         req.cq_handle = cpu_to_le64(ring->handle);
5496                         req.enables |= cpu_to_le32(
5497                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5498                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5499                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5500                 }
5501                 break;
5502         case HWRM_RING_ALLOC_NQ:
5503                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5504                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5505                 if (bp->flags & BNXT_FLAG_USING_MSIX)
5506                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5507                 break;
5508         default:
5509                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5510                            ring_type);
5511                 return -1;
5512         }
5513
5514         mutex_lock(&bp->hwrm_cmd_lock);
5515         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5516         err = le16_to_cpu(resp->error_code);
5517         ring_id = le16_to_cpu(resp->ring_id);
5518         mutex_unlock(&bp->hwrm_cmd_lock);
5519
5520         if (rc || err) {
5521                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5522                            ring_type, rc, err);
5523                 return -EIO;
5524         }
5525         ring->fw_ring_id = ring_id;
5526         return rc;
5527 }
5528
5529 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5530 {
5531         int rc;
5532
5533         if (BNXT_PF(bp)) {
5534                 struct hwrm_func_cfg_input req = {0};
5535
5536                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5537                 req.fid = cpu_to_le16(0xffff);
5538                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5539                 req.async_event_cr = cpu_to_le16(idx);
5540                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5541         } else {
5542                 struct hwrm_func_vf_cfg_input req = {0};
5543
5544                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5545                 req.enables =
5546                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5547                 req.async_event_cr = cpu_to_le16(idx);
5548                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5549         }
5550         return rc;
5551 }
5552
5553 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5554                         u32 map_idx, u32 xid)
5555 {
5556         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5557                 if (BNXT_PF(bp))
5558                         db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5559                 else
5560                         db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5561                 switch (ring_type) {
5562                 case HWRM_RING_ALLOC_TX:
5563                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5564                         break;
5565                 case HWRM_RING_ALLOC_RX:
5566                 case HWRM_RING_ALLOC_AGG:
5567                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5568                         break;
5569                 case HWRM_RING_ALLOC_CMPL:
5570                         db->db_key64 = DBR_PATH_L2;
5571                         break;
5572                 case HWRM_RING_ALLOC_NQ:
5573                         db->db_key64 = DBR_PATH_L2;
5574                         break;
5575                 }
5576                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5577         } else {
5578                 db->doorbell = bp->bar1 + map_idx * 0x80;
5579                 switch (ring_type) {
5580                 case HWRM_RING_ALLOC_TX:
5581                         db->db_key32 = DB_KEY_TX;
5582                         break;
5583                 case HWRM_RING_ALLOC_RX:
5584                 case HWRM_RING_ALLOC_AGG:
5585                         db->db_key32 = DB_KEY_RX;
5586                         break;
5587                 case HWRM_RING_ALLOC_CMPL:
5588                         db->db_key32 = DB_KEY_CP;
5589                         break;
5590                 }
5591         }
5592 }
5593
5594 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5595 {
5596         bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5597         int i, rc = 0;
5598         u32 type;
5599
5600         if (bp->flags & BNXT_FLAG_CHIP_P5)
5601                 type = HWRM_RING_ALLOC_NQ;
5602         else
5603                 type = HWRM_RING_ALLOC_CMPL;
5604         for (i = 0; i < bp->cp_nr_rings; i++) {
5605                 struct bnxt_napi *bnapi = bp->bnapi[i];
5606                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5607                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5608                 u32 map_idx = ring->map_idx;
5609                 unsigned int vector;
5610
5611                 vector = bp->irq_tbl[map_idx].vector;
5612                 disable_irq_nosync(vector);
5613                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5614                 if (rc) {
5615                         enable_irq(vector);
5616                         goto err_out;
5617                 }
5618                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5619                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5620                 enable_irq(vector);
5621                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5622
5623                 if (!i) {
5624                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5625                         if (rc)
5626                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5627                 }
5628         }
5629
5630         type = HWRM_RING_ALLOC_TX;
5631         for (i = 0; i < bp->tx_nr_rings; i++) {
5632                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5633                 struct bnxt_ring_struct *ring;
5634                 u32 map_idx;
5635
5636                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5637                         struct bnxt_napi *bnapi = txr->bnapi;
5638                         struct bnxt_cp_ring_info *cpr, *cpr2;
5639                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5640
5641                         cpr = &bnapi->cp_ring;
5642                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5643                         ring = &cpr2->cp_ring_struct;
5644                         ring->handle = BNXT_TX_HDL;
5645                         map_idx = bnapi->index;
5646                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5647                         if (rc)
5648                                 goto err_out;
5649                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5650                                     ring->fw_ring_id);
5651                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5652                 }
5653                 ring = &txr->tx_ring_struct;
5654                 map_idx = i;
5655                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5656                 if (rc)
5657                         goto err_out;
5658                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5659         }
5660
5661         type = HWRM_RING_ALLOC_RX;
5662         for (i = 0; i < bp->rx_nr_rings; i++) {
5663                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5664                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5665                 struct bnxt_napi *bnapi = rxr->bnapi;
5666                 u32 map_idx = bnapi->index;
5667
5668                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5669                 if (rc)
5670                         goto err_out;
5671                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5672                 /* If we have agg rings, post agg buffers first. */
5673                 if (!agg_rings)
5674                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5675                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5676                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5677                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5678                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5679                         struct bnxt_cp_ring_info *cpr2;
5680
5681                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5682                         ring = &cpr2->cp_ring_struct;
5683                         ring->handle = BNXT_RX_HDL;
5684                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5685                         if (rc)
5686                                 goto err_out;
5687                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5688                                     ring->fw_ring_id);
5689                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5690                 }
5691         }
5692
5693         if (agg_rings) {
5694                 type = HWRM_RING_ALLOC_AGG;
5695                 for (i = 0; i < bp->rx_nr_rings; i++) {
5696                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5697                         struct bnxt_ring_struct *ring =
5698                                                 &rxr->rx_agg_ring_struct;
5699                         u32 grp_idx = ring->grp_idx;
5700                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5701
5702                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5703                         if (rc)
5704                                 goto err_out;
5705
5706                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5707                                     ring->fw_ring_id);
5708                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5709                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5710                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5711                 }
5712         }
5713 err_out:
5714         return rc;
5715 }
5716
5717 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5718                                    struct bnxt_ring_struct *ring,
5719                                    u32 ring_type, int cmpl_ring_id)
5720 {
5721         int rc;
5722         struct hwrm_ring_free_input req = {0};
5723         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5724         u16 error_code;
5725
5726         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
5727                 return 0;
5728
5729         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5730         req.ring_type = ring_type;
5731         req.ring_id = cpu_to_le16(ring->fw_ring_id);
5732
5733         mutex_lock(&bp->hwrm_cmd_lock);
5734         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5735         error_code = le16_to_cpu(resp->error_code);
5736         mutex_unlock(&bp->hwrm_cmd_lock);
5737
5738         if (rc || error_code) {
5739                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5740                            ring_type, rc, error_code);
5741                 return -EIO;
5742         }
5743         return 0;
5744 }
5745
5746 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5747 {
5748         u32 type;
5749         int i;
5750
5751         if (!bp->bnapi)
5752                 return;
5753
5754         for (i = 0; i < bp->tx_nr_rings; i++) {
5755                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5756                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5757
5758                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5759                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5760
5761                         hwrm_ring_free_send_msg(bp, ring,
5762                                                 RING_FREE_REQ_RING_TYPE_TX,
5763                                                 close_path ? cmpl_ring_id :
5764                                                 INVALID_HW_RING_ID);
5765                         ring->fw_ring_id = INVALID_HW_RING_ID;
5766                 }
5767         }
5768
5769         for (i = 0; i < bp->rx_nr_rings; i++) {
5770                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5771                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5772                 u32 grp_idx = rxr->bnapi->index;
5773
5774                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5775                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5776
5777                         hwrm_ring_free_send_msg(bp, ring,
5778                                                 RING_FREE_REQ_RING_TYPE_RX,
5779                                                 close_path ? cmpl_ring_id :
5780                                                 INVALID_HW_RING_ID);
5781                         ring->fw_ring_id = INVALID_HW_RING_ID;
5782                         bp->grp_info[grp_idx].rx_fw_ring_id =
5783                                 INVALID_HW_RING_ID;
5784                 }
5785         }
5786
5787         if (bp->flags & BNXT_FLAG_CHIP_P5)
5788                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5789         else
5790                 type = RING_FREE_REQ_RING_TYPE_RX;
5791         for (i = 0; i < bp->rx_nr_rings; i++) {
5792                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5793                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5794                 u32 grp_idx = rxr->bnapi->index;
5795
5796                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5797                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5798
5799                         hwrm_ring_free_send_msg(bp, ring, type,
5800                                                 close_path ? cmpl_ring_id :
5801                                                 INVALID_HW_RING_ID);
5802                         ring->fw_ring_id = INVALID_HW_RING_ID;
5803                         bp->grp_info[grp_idx].agg_fw_ring_id =
5804                                 INVALID_HW_RING_ID;
5805                 }
5806         }
5807
5808         /* The completion rings are about to be freed.  After that the
5809          * IRQ doorbell will not work anymore.  So we need to disable
5810          * IRQ here.
5811          */
5812         bnxt_disable_int_sync(bp);
5813
5814         if (bp->flags & BNXT_FLAG_CHIP_P5)
5815                 type = RING_FREE_REQ_RING_TYPE_NQ;
5816         else
5817                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5818         for (i = 0; i < bp->cp_nr_rings; i++) {
5819                 struct bnxt_napi *bnapi = bp->bnapi[i];
5820                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5821                 struct bnxt_ring_struct *ring;
5822                 int j;
5823
5824                 for (j = 0; j < 2; j++) {
5825                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5826
5827                         if (cpr2) {
5828                                 ring = &cpr2->cp_ring_struct;
5829                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5830                                         continue;
5831                                 hwrm_ring_free_send_msg(bp, ring,
5832                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
5833                                         INVALID_HW_RING_ID);
5834                                 ring->fw_ring_id = INVALID_HW_RING_ID;
5835                         }
5836                 }
5837                 ring = &cpr->cp_ring_struct;
5838                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5839                         hwrm_ring_free_send_msg(bp, ring, type,
5840                                                 INVALID_HW_RING_ID);
5841                         ring->fw_ring_id = INVALID_HW_RING_ID;
5842                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5843                 }
5844         }
5845 }
5846
5847 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5848                            bool shared);
5849
5850 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5851 {
5852         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5853         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5854         struct hwrm_func_qcfg_input req = {0};
5855         int rc;
5856
5857         if (bp->hwrm_spec_code < 0x10601)
5858                 return 0;
5859
5860         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5861         req.fid = cpu_to_le16(0xffff);
5862         mutex_lock(&bp->hwrm_cmd_lock);
5863         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5864         if (rc) {
5865                 mutex_unlock(&bp->hwrm_cmd_lock);
5866                 return rc;
5867         }
5868
5869         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5870         if (BNXT_NEW_RM(bp)) {
5871                 u16 cp, stats;
5872
5873                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5874                 hw_resc->resv_hw_ring_grps =
5875                         le32_to_cpu(resp->alloc_hw_ring_grps);
5876                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5877                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5878                 stats = le16_to_cpu(resp->alloc_stat_ctx);
5879                 hw_resc->resv_irqs = cp;
5880                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5881                         int rx = hw_resc->resv_rx_rings;
5882                         int tx = hw_resc->resv_tx_rings;
5883
5884                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5885                                 rx >>= 1;
5886                         if (cp < (rx + tx)) {
5887                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5888                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5889                                         rx <<= 1;
5890                                 hw_resc->resv_rx_rings = rx;
5891                                 hw_resc->resv_tx_rings = tx;
5892                         }
5893                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5894                         hw_resc->resv_hw_ring_grps = rx;
5895                 }
5896                 hw_resc->resv_cp_rings = cp;
5897                 hw_resc->resv_stat_ctxs = stats;
5898         }
5899         mutex_unlock(&bp->hwrm_cmd_lock);
5900         return 0;
5901 }
5902
5903 /* Caller must hold bp->hwrm_cmd_lock */
5904 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5905 {
5906         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5907         struct hwrm_func_qcfg_input req = {0};
5908         int rc;
5909
5910         if (bp->hwrm_spec_code < 0x10601)
5911                 return 0;
5912
5913         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5914         req.fid = cpu_to_le16(fid);
5915         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5916         if (!rc)
5917                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5918
5919         return rc;
5920 }
5921
5922 static bool bnxt_rfs_supported(struct bnxt *bp);
5923
5924 static void
5925 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5926                              int tx_rings, int rx_rings, int ring_grps,
5927                              int cp_rings, int stats, int vnics)
5928 {
5929         u32 enables = 0;
5930
5931         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5932         req->fid = cpu_to_le16(0xffff);
5933         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5934         req->num_tx_rings = cpu_to_le16(tx_rings);
5935         if (BNXT_NEW_RM(bp)) {
5936                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5937                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5938                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5939                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5940                         enables |= tx_rings + ring_grps ?
5941                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5942                         enables |= rx_rings ?
5943                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5944                 } else {
5945                         enables |= cp_rings ?
5946                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5947                         enables |= ring_grps ?
5948                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5949                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5950                 }
5951                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
5952
5953                 req->num_rx_rings = cpu_to_le16(rx_rings);
5954                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5955                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5956                         req->num_msix = cpu_to_le16(cp_rings);
5957                         req->num_rsscos_ctxs =
5958                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5959                 } else {
5960                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
5961                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5962                         req->num_rsscos_ctxs = cpu_to_le16(1);
5963                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5964                             bnxt_rfs_supported(bp))
5965                                 req->num_rsscos_ctxs =
5966                                         cpu_to_le16(ring_grps + 1);
5967                 }
5968                 req->num_stat_ctxs = cpu_to_le16(stats);
5969                 req->num_vnics = cpu_to_le16(vnics);
5970         }
5971         req->enables = cpu_to_le32(enables);
5972 }
5973
5974 static void
5975 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5976                              struct hwrm_func_vf_cfg_input *req, int tx_rings,
5977                              int rx_rings, int ring_grps, int cp_rings,
5978                              int stats, int vnics)
5979 {
5980         u32 enables = 0;
5981
5982         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5983         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5984         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5985                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5986         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5987         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5988                 enables |= tx_rings + ring_grps ?
5989                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5990         } else {
5991                 enables |= cp_rings ?
5992                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5993                 enables |= ring_grps ?
5994                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5995         }
5996         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
5997         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
5998
5999         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6000         req->num_tx_rings = cpu_to_le16(tx_rings);
6001         req->num_rx_rings = cpu_to_le16(rx_rings);
6002         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6003                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6004                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6005         } else {
6006                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6007                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6008                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6009         }
6010         req->num_stat_ctxs = cpu_to_le16(stats);
6011         req->num_vnics = cpu_to_le16(vnics);
6012
6013         req->enables = cpu_to_le32(enables);
6014 }
6015
6016 static int
6017 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6018                            int ring_grps, int cp_rings, int stats, int vnics)
6019 {
6020         struct hwrm_func_cfg_input req = {0};
6021         int rc;
6022
6023         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6024                                      cp_rings, stats, vnics);
6025         if (!req.enables)
6026                 return 0;
6027
6028         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6029         if (rc)
6030                 return rc;
6031
6032         if (bp->hwrm_spec_code < 0x10601)
6033                 bp->hw_resc.resv_tx_rings = tx_rings;
6034
6035         return bnxt_hwrm_get_rings(bp);
6036 }
6037
6038 static int
6039 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6040                            int ring_grps, int cp_rings, int stats, int vnics)
6041 {
6042         struct hwrm_func_vf_cfg_input req = {0};
6043         int rc;
6044
6045         if (!BNXT_NEW_RM(bp)) {
6046                 bp->hw_resc.resv_tx_rings = tx_rings;
6047                 return 0;
6048         }
6049
6050         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6051                                      cp_rings, stats, vnics);
6052         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6053         if (rc)
6054                 return rc;
6055
6056         return bnxt_hwrm_get_rings(bp);
6057 }
6058
6059 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6060                                    int cp, int stat, int vnic)
6061 {
6062         if (BNXT_PF(bp))
6063                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6064                                                   vnic);
6065         else
6066                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6067                                                   vnic);
6068 }
6069
6070 int bnxt_nq_rings_in_use(struct bnxt *bp)
6071 {
6072         int cp = bp->cp_nr_rings;
6073         int ulp_msix, ulp_base;
6074
6075         ulp_msix = bnxt_get_ulp_msix_num(bp);
6076         if (ulp_msix) {
6077                 ulp_base = bnxt_get_ulp_msix_base(bp);
6078                 cp += ulp_msix;
6079                 if ((ulp_base + ulp_msix) > cp)
6080                         cp = ulp_base + ulp_msix;
6081         }
6082         return cp;
6083 }
6084
6085 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6086 {
6087         int cp;
6088
6089         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6090                 return bnxt_nq_rings_in_use(bp);
6091
6092         cp = bp->tx_nr_rings + bp->rx_nr_rings;
6093         return cp;
6094 }
6095
6096 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6097 {
6098         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6099         int cp = bp->cp_nr_rings;
6100
6101         if (!ulp_stat)
6102                 return cp;
6103
6104         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6105                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6106
6107         return cp + ulp_stat;
6108 }
6109
6110 /* Check if a default RSS map needs to be setup.  This function is only
6111  * used on older firmware that does not require reserving RX rings.
6112  */
6113 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6114 {
6115         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6116
6117         /* The RSS map is valid for RX rings set to resv_rx_rings */
6118         if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6119                 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6120                 if (!netif_is_rxfh_configured(bp->dev))
6121                         bnxt_set_dflt_rss_indir_tbl(bp);
6122         }
6123 }
6124
6125 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6126 {
6127         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6128         int cp = bnxt_cp_rings_in_use(bp);
6129         int nq = bnxt_nq_rings_in_use(bp);
6130         int rx = bp->rx_nr_rings, stat;
6131         int vnic = 1, grp = rx;
6132
6133         if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6134             bp->hwrm_spec_code >= 0x10601)
6135                 return true;
6136
6137         /* Old firmware does not need RX ring reservations but we still
6138          * need to setup a default RSS map when needed.  With new firmware
6139          * we go through RX ring reservations first and then set up the
6140          * RSS map for the successfully reserved RX rings when needed.
6141          */
6142         if (!BNXT_NEW_RM(bp)) {
6143                 bnxt_check_rss_tbl_no_rmgr(bp);
6144                 return false;
6145         }
6146         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6147                 vnic = rx + 1;
6148         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6149                 rx <<= 1;
6150         stat = bnxt_get_func_stat_ctxs(bp);
6151         if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6152             hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6153             (hw_resc->resv_hw_ring_grps != grp &&
6154              !(bp->flags & BNXT_FLAG_CHIP_P5)))
6155                 return true;
6156         if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6157             hw_resc->resv_irqs != nq)
6158                 return true;
6159         return false;
6160 }
6161
6162 static int __bnxt_reserve_rings(struct bnxt *bp)
6163 {
6164         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6165         int cp = bnxt_nq_rings_in_use(bp);
6166         int tx = bp->tx_nr_rings;
6167         int rx = bp->rx_nr_rings;
6168         int grp, rx_rings, rc;
6169         int vnic = 1, stat;
6170         bool sh = false;
6171
6172         if (!bnxt_need_reserve_rings(bp))
6173                 return 0;
6174
6175         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6176                 sh = true;
6177         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6178                 vnic = rx + 1;
6179         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6180                 rx <<= 1;
6181         grp = bp->rx_nr_rings;
6182         stat = bnxt_get_func_stat_ctxs(bp);
6183
6184         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6185         if (rc)
6186                 return rc;
6187
6188         tx = hw_resc->resv_tx_rings;
6189         if (BNXT_NEW_RM(bp)) {
6190                 rx = hw_resc->resv_rx_rings;
6191                 cp = hw_resc->resv_irqs;
6192                 grp = hw_resc->resv_hw_ring_grps;
6193                 vnic = hw_resc->resv_vnics;
6194                 stat = hw_resc->resv_stat_ctxs;
6195         }
6196
6197         rx_rings = rx;
6198         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6199                 if (rx >= 2) {
6200                         rx_rings = rx >> 1;
6201                 } else {
6202                         if (netif_running(bp->dev))
6203                                 return -ENOMEM;
6204
6205                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6206                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6207                         bp->dev->hw_features &= ~NETIF_F_LRO;
6208                         bp->dev->features &= ~NETIF_F_LRO;
6209                         bnxt_set_ring_params(bp);
6210                 }
6211         }
6212         rx_rings = min_t(int, rx_rings, grp);
6213         cp = min_t(int, cp, bp->cp_nr_rings);
6214         if (stat > bnxt_get_ulp_stat_ctxs(bp))
6215                 stat -= bnxt_get_ulp_stat_ctxs(bp);
6216         cp = min_t(int, cp, stat);
6217         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6218         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6219                 rx = rx_rings << 1;
6220         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6221         bp->tx_nr_rings = tx;
6222
6223         /* If we cannot reserve all the RX rings, reset the RSS map only
6224          * if absolutely necessary
6225          */
6226         if (rx_rings != bp->rx_nr_rings) {
6227                 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6228                             rx_rings, bp->rx_nr_rings);
6229                 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6230                     (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6231                      bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6232                      bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6233                         netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6234                         bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6235                 }
6236         }
6237         bp->rx_nr_rings = rx_rings;
6238         bp->cp_nr_rings = cp;
6239
6240         if (!tx || !rx || !cp || !grp || !vnic || !stat)
6241                 return -ENOMEM;
6242
6243         if (!netif_is_rxfh_configured(bp->dev))
6244                 bnxt_set_dflt_rss_indir_tbl(bp);
6245
6246         return rc;
6247 }
6248
6249 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6250                                     int ring_grps, int cp_rings, int stats,
6251                                     int vnics)
6252 {
6253         struct hwrm_func_vf_cfg_input req = {0};
6254         u32 flags;
6255
6256         if (!BNXT_NEW_RM(bp))
6257                 return 0;
6258
6259         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6260                                      cp_rings, stats, vnics);
6261         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6262                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6263                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6264                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6265                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6266                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6267         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6268                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6269
6270         req.flags = cpu_to_le32(flags);
6271         return hwrm_send_message_silent(bp, &req, sizeof(req),
6272                                         HWRM_CMD_TIMEOUT);
6273 }
6274
6275 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6276                                     int ring_grps, int cp_rings, int stats,
6277                                     int vnics)
6278 {
6279         struct hwrm_func_cfg_input req = {0};
6280         u32 flags;
6281
6282         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6283                                      cp_rings, stats, vnics);
6284         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6285         if (BNXT_NEW_RM(bp)) {
6286                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6287                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6288                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6289                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6290                 if (bp->flags & BNXT_FLAG_CHIP_P5)
6291                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6292                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6293                 else
6294                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6295         }
6296
6297         req.flags = cpu_to_le32(flags);
6298         return hwrm_send_message_silent(bp, &req, sizeof(req),
6299                                         HWRM_CMD_TIMEOUT);
6300 }
6301
6302 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6303                                  int ring_grps, int cp_rings, int stats,
6304                                  int vnics)
6305 {
6306         if (bp->hwrm_spec_code < 0x10801)
6307                 return 0;
6308
6309         if (BNXT_PF(bp))
6310                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6311                                                 ring_grps, cp_rings, stats,
6312                                                 vnics);
6313
6314         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6315                                         cp_rings, stats, vnics);
6316 }
6317
6318 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6319 {
6320         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6321         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6322         struct hwrm_ring_aggint_qcaps_input req = {0};
6323         int rc;
6324
6325         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6326         coal_cap->num_cmpl_dma_aggr_max = 63;
6327         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6328         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6329         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6330         coal_cap->int_lat_tmr_min_max = 65535;
6331         coal_cap->int_lat_tmr_max_max = 65535;
6332         coal_cap->num_cmpl_aggr_int_max = 65535;
6333         coal_cap->timer_units = 80;
6334
6335         if (bp->hwrm_spec_code < 0x10902)
6336                 return;
6337
6338         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6339         mutex_lock(&bp->hwrm_cmd_lock);
6340         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6341         if (!rc) {
6342                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6343                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6344                 coal_cap->num_cmpl_dma_aggr_max =
6345                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6346                 coal_cap->num_cmpl_dma_aggr_during_int_max =
6347                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6348                 coal_cap->cmpl_aggr_dma_tmr_max =
6349                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6350                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6351                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6352                 coal_cap->int_lat_tmr_min_max =
6353                         le16_to_cpu(resp->int_lat_tmr_min_max);
6354                 coal_cap->int_lat_tmr_max_max =
6355                         le16_to_cpu(resp->int_lat_tmr_max_max);
6356                 coal_cap->num_cmpl_aggr_int_max =
6357                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
6358                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6359         }
6360         mutex_unlock(&bp->hwrm_cmd_lock);
6361 }
6362
6363 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6364 {
6365         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6366
6367         return usec * 1000 / coal_cap->timer_units;
6368 }
6369
6370 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6371         struct bnxt_coal *hw_coal,
6372         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6373 {
6374         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6375         u32 cmpl_params = coal_cap->cmpl_params;
6376         u16 val, tmr, max, flags = 0;
6377
6378         max = hw_coal->bufs_per_record * 128;
6379         if (hw_coal->budget)
6380                 max = hw_coal->bufs_per_record * hw_coal->budget;
6381         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6382
6383         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6384         req->num_cmpl_aggr_int = cpu_to_le16(val);
6385
6386         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6387         req->num_cmpl_dma_aggr = cpu_to_le16(val);
6388
6389         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6390                       coal_cap->num_cmpl_dma_aggr_during_int_max);
6391         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6392
6393         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6394         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6395         req->int_lat_tmr_max = cpu_to_le16(tmr);
6396
6397         /* min timer set to 1/2 of interrupt timer */
6398         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6399                 val = tmr / 2;
6400                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6401                 req->int_lat_tmr_min = cpu_to_le16(val);
6402                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6403         }
6404
6405         /* buf timer set to 1/4 of interrupt timer */
6406         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6407         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6408
6409         if (cmpl_params &
6410             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6411                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6412                 val = clamp_t(u16, tmr, 1,
6413                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6414                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6415                 req->enables |=
6416                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6417         }
6418
6419         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6420                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6421         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6422             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6423                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6424         req->flags = cpu_to_le16(flags);
6425         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6426 }
6427
6428 /* Caller holds bp->hwrm_cmd_lock */
6429 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6430                                    struct bnxt_coal *hw_coal)
6431 {
6432         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6433         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6434         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6435         u32 nq_params = coal_cap->nq_params;
6436         u16 tmr;
6437
6438         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6439                 return 0;
6440
6441         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6442                                -1, -1);
6443         req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6444         req.flags =
6445                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6446
6447         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6448         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6449         req.int_lat_tmr_min = cpu_to_le16(tmr);
6450         req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6451         return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6452 }
6453
6454 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6455 {
6456         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6457         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6458         struct bnxt_coal coal;
6459
6460         /* Tick values in micro seconds.
6461          * 1 coal_buf x bufs_per_record = 1 completion record.
6462          */
6463         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6464
6465         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6466         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6467
6468         if (!bnapi->rx_ring)
6469                 return -ENODEV;
6470
6471         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6472                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6473
6474         bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6475
6476         req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6477
6478         return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6479                                  HWRM_CMD_TIMEOUT);
6480 }
6481
6482 int bnxt_hwrm_set_coal(struct bnxt *bp)
6483 {
6484         int i, rc = 0;
6485         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6486                                                            req_tx = {0}, *req;
6487
6488         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6489                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6490         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6491                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6492
6493         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6494         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6495
6496         mutex_lock(&bp->hwrm_cmd_lock);
6497         for (i = 0; i < bp->cp_nr_rings; i++) {
6498                 struct bnxt_napi *bnapi = bp->bnapi[i];
6499                 struct bnxt_coal *hw_coal;
6500                 u16 ring_id;
6501
6502                 req = &req_rx;
6503                 if (!bnapi->rx_ring) {
6504                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6505                         req = &req_tx;
6506                 } else {
6507                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6508                 }
6509                 req->ring_id = cpu_to_le16(ring_id);
6510
6511                 rc = _hwrm_send_message(bp, req, sizeof(*req),
6512                                         HWRM_CMD_TIMEOUT);
6513                 if (rc)
6514                         break;
6515
6516                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6517                         continue;
6518
6519                 if (bnapi->rx_ring && bnapi->tx_ring) {
6520                         req = &req_tx;
6521                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6522                         req->ring_id = cpu_to_le16(ring_id);
6523                         rc = _hwrm_send_message(bp, req, sizeof(*req),
6524                                                 HWRM_CMD_TIMEOUT);
6525                         if (rc)
6526                                 break;
6527                 }
6528                 if (bnapi->rx_ring)
6529                         hw_coal = &bp->rx_coal;
6530                 else
6531                         hw_coal = &bp->tx_coal;
6532                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6533         }
6534         mutex_unlock(&bp->hwrm_cmd_lock);
6535         return rc;
6536 }
6537
6538 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6539 {
6540         struct hwrm_stat_ctx_clr_stats_input req0 = {0};
6541         struct hwrm_stat_ctx_free_input req = {0};
6542         int i;
6543
6544         if (!bp->bnapi)
6545                 return;
6546
6547         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6548                 return;
6549
6550         bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
6551         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6552
6553         mutex_lock(&bp->hwrm_cmd_lock);
6554         for (i = 0; i < bp->cp_nr_rings; i++) {
6555                 struct bnxt_napi *bnapi = bp->bnapi[i];
6556                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6557
6558                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6559                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6560                         if (BNXT_FW_MAJ(bp) <= 20) {
6561                                 req0.stat_ctx_id = req.stat_ctx_id;
6562                                 _hwrm_send_message(bp, &req0, sizeof(req0),
6563                                                    HWRM_CMD_TIMEOUT);
6564                         }
6565                         _hwrm_send_message(bp, &req, sizeof(req),
6566                                            HWRM_CMD_TIMEOUT);
6567
6568                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6569                 }
6570         }
6571         mutex_unlock(&bp->hwrm_cmd_lock);
6572 }
6573
6574 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6575 {
6576         int rc = 0, i;
6577         struct hwrm_stat_ctx_alloc_input req = {0};
6578         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6579
6580         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6581                 return 0;
6582
6583         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6584
6585         req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6586         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6587
6588         mutex_lock(&bp->hwrm_cmd_lock);
6589         for (i = 0; i < bp->cp_nr_rings; i++) {
6590                 struct bnxt_napi *bnapi = bp->bnapi[i];
6591                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6592
6593                 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6594
6595                 rc = _hwrm_send_message(bp, &req, sizeof(req),
6596                                         HWRM_CMD_TIMEOUT);
6597                 if (rc)
6598                         break;
6599
6600                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6601
6602                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6603         }
6604         mutex_unlock(&bp->hwrm_cmd_lock);
6605         return rc;
6606 }
6607
6608 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6609 {
6610         struct hwrm_func_qcfg_input req = {0};
6611         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6612         u32 min_db_offset = 0;
6613         u16 flags;
6614         int rc;
6615
6616         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6617         req.fid = cpu_to_le16(0xffff);
6618         mutex_lock(&bp->hwrm_cmd_lock);
6619         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6620         if (rc)
6621                 goto func_qcfg_exit;
6622
6623 #ifdef CONFIG_BNXT_SRIOV
6624         if (BNXT_VF(bp)) {
6625                 struct bnxt_vf_info *vf = &bp->vf;
6626
6627                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6628         } else {
6629                 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6630         }
6631 #endif
6632         flags = le16_to_cpu(resp->flags);
6633         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6634                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6635                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6636                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6637                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6638         }
6639         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6640                 bp->flags |= BNXT_FLAG_MULTI_HOST;
6641
6642         switch (resp->port_partition_type) {
6643         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6644         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6645         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6646                 bp->port_partition_type = resp->port_partition_type;
6647                 break;
6648         }
6649         if (bp->hwrm_spec_code < 0x10707 ||
6650             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6651                 bp->br_mode = BRIDGE_MODE_VEB;
6652         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6653                 bp->br_mode = BRIDGE_MODE_VEPA;
6654         else
6655                 bp->br_mode = BRIDGE_MODE_UNDEF;
6656
6657         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6658         if (!bp->max_mtu)
6659                 bp->max_mtu = BNXT_MAX_MTU;
6660
6661         if (bp->db_size)
6662                 goto func_qcfg_exit;
6663
6664         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6665                 if (BNXT_PF(bp))
6666                         min_db_offset = DB_PF_OFFSET_P5;
6667                 else
6668                         min_db_offset = DB_VF_OFFSET_P5;
6669         }
6670         bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6671                                  1024);
6672         if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6673             bp->db_size <= min_db_offset)
6674                 bp->db_size = pci_resource_len(bp->pdev, 2);
6675
6676 func_qcfg_exit:
6677         mutex_unlock(&bp->hwrm_cmd_lock);
6678         return rc;
6679 }
6680
6681 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6682 {
6683         struct hwrm_func_backing_store_qcaps_input req = {0};
6684         struct hwrm_func_backing_store_qcaps_output *resp =
6685                 bp->hwrm_cmd_resp_addr;
6686         int rc;
6687
6688         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6689                 return 0;
6690
6691         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6692         mutex_lock(&bp->hwrm_cmd_lock);
6693         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6694         if (!rc) {
6695                 struct bnxt_ctx_pg_info *ctx_pg;
6696                 struct bnxt_ctx_mem_info *ctx;
6697                 int i, tqm_rings;
6698
6699                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6700                 if (!ctx) {
6701                         rc = -ENOMEM;
6702                         goto ctx_err;
6703                 }
6704                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6705                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6706                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6707                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6708                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6709                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6710                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6711                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6712                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6713                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6714                 ctx->vnic_max_vnic_entries =
6715                         le16_to_cpu(resp->vnic_max_vnic_entries);
6716                 ctx->vnic_max_ring_table_entries =
6717                         le16_to_cpu(resp->vnic_max_ring_table_entries);
6718                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6719                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6720                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6721                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6722                 ctx->tqm_min_entries_per_ring =
6723                         le32_to_cpu(resp->tqm_min_entries_per_ring);
6724                 ctx->tqm_max_entries_per_ring =
6725                         le32_to_cpu(resp->tqm_max_entries_per_ring);
6726                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6727                 if (!ctx->tqm_entries_multiple)
6728                         ctx->tqm_entries_multiple = 1;
6729                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6730                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6731                 ctx->mrav_num_entries_units =
6732                         le16_to_cpu(resp->mrav_num_entries_units);
6733                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6734                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6735                 ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
6736                 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6737                 if (!ctx->tqm_fp_rings_count)
6738                         ctx->tqm_fp_rings_count = bp->max_q;
6739
6740                 tqm_rings = ctx->tqm_fp_rings_count + 1;
6741                 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6742                 if (!ctx_pg) {
6743                         kfree(ctx);
6744                         rc = -ENOMEM;
6745                         goto ctx_err;
6746                 }
6747                 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6748                         ctx->tqm_mem[i] = ctx_pg;
6749                 bp->ctx = ctx;
6750         } else {
6751                 rc = 0;
6752         }
6753 ctx_err:
6754         mutex_unlock(&bp->hwrm_cmd_lock);
6755         return rc;
6756 }
6757
6758 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6759                                   __le64 *pg_dir)
6760 {
6761         u8 pg_size = 0;
6762
6763         if (BNXT_PAGE_SHIFT == 13)
6764                 pg_size = 1 << 4;
6765         else if (BNXT_PAGE_SIZE == 16)
6766                 pg_size = 2 << 4;
6767
6768         *pg_attr = pg_size;
6769         if (rmem->depth >= 1) {
6770                 if (rmem->depth == 2)
6771                         *pg_attr |= 2;
6772                 else
6773                         *pg_attr |= 1;
6774                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6775         } else {
6776                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6777         }
6778 }
6779
6780 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
6781         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
6782          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
6783          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
6784          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
6785          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6786
6787 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6788 {
6789         struct hwrm_func_backing_store_cfg_input req = {0};
6790         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6791         struct bnxt_ctx_pg_info *ctx_pg;
6792         __le32 *num_entries;
6793         __le64 *pg_dir;
6794         u32 flags = 0;
6795         u8 *pg_attr;
6796         u32 ena;
6797         int i;
6798
6799         if (!ctx)
6800                 return 0;
6801
6802         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6803         req.enables = cpu_to_le32(enables);
6804
6805         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6806                 ctx_pg = &ctx->qp_mem;
6807                 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6808                 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6809                 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6810                 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6811                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6812                                       &req.qpc_pg_size_qpc_lvl,
6813                                       &req.qpc_page_dir);
6814         }
6815         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6816                 ctx_pg = &ctx->srq_mem;
6817                 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6818                 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6819                 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6820                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6821                                       &req.srq_pg_size_srq_lvl,
6822                                       &req.srq_page_dir);
6823         }
6824         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6825                 ctx_pg = &ctx->cq_mem;
6826                 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6827                 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6828                 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6829                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6830                                       &req.cq_page_dir);
6831         }
6832         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6833                 ctx_pg = &ctx->vnic_mem;
6834                 req.vnic_num_vnic_entries =
6835                         cpu_to_le16(ctx->vnic_max_vnic_entries);
6836                 req.vnic_num_ring_table_entries =
6837                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
6838                 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6839                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6840                                       &req.vnic_pg_size_vnic_lvl,
6841                                       &req.vnic_page_dir);
6842         }
6843         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6844                 ctx_pg = &ctx->stat_mem;
6845                 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6846                 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6847                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6848                                       &req.stat_pg_size_stat_lvl,
6849                                       &req.stat_page_dir);
6850         }
6851         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6852                 ctx_pg = &ctx->mrav_mem;
6853                 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6854                 if (ctx->mrav_num_entries_units)
6855                         flags |=
6856                         FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
6857                 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6858                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6859                                       &req.mrav_pg_size_mrav_lvl,
6860                                       &req.mrav_page_dir);
6861         }
6862         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6863                 ctx_pg = &ctx->tim_mem;
6864                 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6865                 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6866                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6867                                       &req.tim_pg_size_tim_lvl,
6868                                       &req.tim_page_dir);
6869         }
6870         for (i = 0, num_entries = &req.tqm_sp_num_entries,
6871              pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6872              pg_dir = &req.tqm_sp_page_dir,
6873              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6874              i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6875                 if (!(enables & ena))
6876                         continue;
6877
6878                 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6879                 ctx_pg = ctx->tqm_mem[i];
6880                 *num_entries = cpu_to_le32(ctx_pg->entries);
6881                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6882         }
6883         req.flags = cpu_to_le32(flags);
6884         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6885 }
6886
6887 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6888                                   struct bnxt_ctx_pg_info *ctx_pg)
6889 {
6890         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6891
6892         rmem->page_size = BNXT_PAGE_SIZE;
6893         rmem->pg_arr = ctx_pg->ctx_pg_arr;
6894         rmem->dma_arr = ctx_pg->ctx_dma_arr;
6895         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
6896         if (rmem->depth >= 1)
6897                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
6898         return bnxt_alloc_ring(bp, rmem);
6899 }
6900
6901 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6902                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6903                                   u8 depth, bool use_init_val)
6904 {
6905         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6906         int rc;
6907
6908         if (!mem_size)
6909                 return -EINVAL;
6910
6911         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6912         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6913                 ctx_pg->nr_pages = 0;
6914                 return -EINVAL;
6915         }
6916         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6917                 int nr_tbls, i;
6918
6919                 rmem->depth = 2;
6920                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6921                                              GFP_KERNEL);
6922                 if (!ctx_pg->ctx_pg_tbl)
6923                         return -ENOMEM;
6924                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6925                 rmem->nr_pages = nr_tbls;
6926                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6927                 if (rc)
6928                         return rc;
6929                 for (i = 0; i < nr_tbls; i++) {
6930                         struct bnxt_ctx_pg_info *pg_tbl;
6931
6932                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6933                         if (!pg_tbl)
6934                                 return -ENOMEM;
6935                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6936                         rmem = &pg_tbl->ring_mem;
6937                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6938                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6939                         rmem->depth = 1;
6940                         rmem->nr_pages = MAX_CTX_PAGES;
6941                         if (use_init_val)
6942                                 rmem->init_val = bp->ctx->ctx_kind_initializer;
6943                         if (i == (nr_tbls - 1)) {
6944                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6945
6946                                 if (rem)
6947                                         rmem->nr_pages = rem;
6948                         }
6949                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6950                         if (rc)
6951                                 break;
6952                 }
6953         } else {
6954                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6955                 if (rmem->nr_pages > 1 || depth)
6956                         rmem->depth = 1;
6957                 if (use_init_val)
6958                         rmem->init_val = bp->ctx->ctx_kind_initializer;
6959                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6960         }
6961         return rc;
6962 }
6963
6964 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6965                                   struct bnxt_ctx_pg_info *ctx_pg)
6966 {
6967         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6968
6969         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6970             ctx_pg->ctx_pg_tbl) {
6971                 int i, nr_tbls = rmem->nr_pages;
6972
6973                 for (i = 0; i < nr_tbls; i++) {
6974                         struct bnxt_ctx_pg_info *pg_tbl;
6975                         struct bnxt_ring_mem_info *rmem2;
6976
6977                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
6978                         if (!pg_tbl)
6979                                 continue;
6980                         rmem2 = &pg_tbl->ring_mem;
6981                         bnxt_free_ring(bp, rmem2);
6982                         ctx_pg->ctx_pg_arr[i] = NULL;
6983                         kfree(pg_tbl);
6984                         ctx_pg->ctx_pg_tbl[i] = NULL;
6985                 }
6986                 kfree(ctx_pg->ctx_pg_tbl);
6987                 ctx_pg->ctx_pg_tbl = NULL;
6988         }
6989         bnxt_free_ring(bp, rmem);
6990         ctx_pg->nr_pages = 0;
6991 }
6992
6993 static void bnxt_free_ctx_mem(struct bnxt *bp)
6994 {
6995         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6996         int i;
6997
6998         if (!ctx)
6999                 return;
7000
7001         if (ctx->tqm_mem[0]) {
7002                 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7003                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7004                 kfree(ctx->tqm_mem[0]);
7005                 ctx->tqm_mem[0] = NULL;
7006         }
7007
7008         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7009         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7010         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7011         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7012         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7013         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7014         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7015         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7016 }
7017
7018 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7019 {
7020         struct bnxt_ctx_pg_info *ctx_pg;
7021         struct bnxt_ctx_mem_info *ctx;
7022         u32 mem_size, ena, entries;
7023         u32 entries_sp, min;
7024         u32 num_mr, num_ah;
7025         u32 extra_srqs = 0;
7026         u32 extra_qps = 0;
7027         u8 pg_lvl = 1;
7028         int i, rc;
7029
7030         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7031         if (rc) {
7032                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7033                            rc);
7034                 return rc;
7035         }
7036         ctx = bp->ctx;
7037         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7038                 return 0;
7039
7040         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7041                 pg_lvl = 2;
7042                 extra_qps = 65536;
7043                 extra_srqs = 8192;
7044         }
7045
7046         ctx_pg = &ctx->qp_mem;
7047         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7048                           extra_qps;
7049         mem_size = ctx->qp_entry_size * ctx_pg->entries;
7050         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7051         if (rc)
7052                 return rc;
7053
7054         ctx_pg = &ctx->srq_mem;
7055         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7056         mem_size = ctx->srq_entry_size * ctx_pg->entries;
7057         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7058         if (rc)
7059                 return rc;
7060
7061         ctx_pg = &ctx->cq_mem;
7062         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7063         mem_size = ctx->cq_entry_size * ctx_pg->entries;
7064         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7065         if (rc)
7066                 return rc;
7067
7068         ctx_pg = &ctx->vnic_mem;
7069         ctx_pg->entries = ctx->vnic_max_vnic_entries +
7070                           ctx->vnic_max_ring_table_entries;
7071         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7072         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
7073         if (rc)
7074                 return rc;
7075
7076         ctx_pg = &ctx->stat_mem;
7077         ctx_pg->entries = ctx->stat_max_entries;
7078         mem_size = ctx->stat_entry_size * ctx_pg->entries;
7079         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
7080         if (rc)
7081                 return rc;
7082
7083         ena = 0;
7084         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7085                 goto skip_rdma;
7086
7087         ctx_pg = &ctx->mrav_mem;
7088         /* 128K extra is needed to accommodate static AH context
7089          * allocation by f/w.
7090          */
7091         num_mr = 1024 * 256;
7092         num_ah = 1024 * 128;
7093         ctx_pg->entries = num_mr + num_ah;
7094         mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7095         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
7096         if (rc)
7097                 return rc;
7098         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7099         if (ctx->mrav_num_entries_units)
7100                 ctx_pg->entries =
7101                         ((num_mr / ctx->mrav_num_entries_units) << 16) |
7102                          (num_ah / ctx->mrav_num_entries_units);
7103
7104         ctx_pg = &ctx->tim_mem;
7105         ctx_pg->entries = ctx->qp_mem.entries;
7106         mem_size = ctx->tim_entry_size * ctx_pg->entries;
7107         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
7108         if (rc)
7109                 return rc;
7110         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7111
7112 skip_rdma:
7113         min = ctx->tqm_min_entries_per_ring;
7114         entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7115                      2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7116         entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7117         entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
7118         entries = roundup(entries, ctx->tqm_entries_multiple);
7119         entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7120         for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7121                 ctx_pg = ctx->tqm_mem[i];
7122                 ctx_pg->entries = i ? entries : entries_sp;
7123                 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7124                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
7125                 if (rc)
7126                         return rc;
7127                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7128         }
7129         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7130         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7131         if (rc) {
7132                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7133                            rc);
7134                 return rc;
7135         }
7136         ctx->flags |= BNXT_CTX_FLAG_INITED;
7137         return 0;
7138 }
7139
7140 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7141 {
7142         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7143         struct hwrm_func_resource_qcaps_input req = {0};
7144         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7145         int rc;
7146
7147         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7148         req.fid = cpu_to_le16(0xffff);
7149
7150         mutex_lock(&bp->hwrm_cmd_lock);
7151         rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7152                                        HWRM_CMD_TIMEOUT);
7153         if (rc)
7154                 goto hwrm_func_resc_qcaps_exit;
7155
7156         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7157         if (!all)
7158                 goto hwrm_func_resc_qcaps_exit;
7159
7160         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7161         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7162         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7163         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7164         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7165         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7166         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7167         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7168         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7169         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7170         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7171         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7172         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7173         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7174         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7175         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7176
7177         if (bp->flags & BNXT_FLAG_CHIP_P5) {
7178                 u16 max_msix = le16_to_cpu(resp->max_msix);
7179
7180                 hw_resc->max_nqs = max_msix;
7181                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7182         }
7183
7184         if (BNXT_PF(bp)) {
7185                 struct bnxt_pf_info *pf = &bp->pf;
7186
7187                 pf->vf_resv_strategy =
7188                         le16_to_cpu(resp->vf_reservation_strategy);
7189                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7190                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7191         }
7192 hwrm_func_resc_qcaps_exit:
7193         mutex_unlock(&bp->hwrm_cmd_lock);
7194         return rc;
7195 }
7196
7197 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7198 {
7199         int rc = 0;
7200         struct hwrm_func_qcaps_input req = {0};
7201         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7202         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7203         u32 flags, flags_ext;
7204
7205         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7206         req.fid = cpu_to_le16(0xffff);
7207
7208         mutex_lock(&bp->hwrm_cmd_lock);
7209         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7210         if (rc)
7211                 goto hwrm_func_qcaps_exit;
7212
7213         flags = le32_to_cpu(resp->flags);
7214         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7215                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7216         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7217                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7218         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7219                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7220         if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7221                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7222         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7223                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7224         if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7225                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7226         if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7227                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7228         if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7229                 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7230
7231         flags_ext = le32_to_cpu(resp->flags_ext);
7232         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7233                 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7234
7235         bp->tx_push_thresh = 0;
7236         if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7237             BNXT_FW_MAJ(bp) > 217)
7238                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7239
7240         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7241         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7242         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7243         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7244         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7245         if (!hw_resc->max_hw_ring_grps)
7246                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7247         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7248         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7249         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7250
7251         if (BNXT_PF(bp)) {
7252                 struct bnxt_pf_info *pf = &bp->pf;
7253
7254                 pf->fw_fid = le16_to_cpu(resp->fid);
7255                 pf->port_id = le16_to_cpu(resp->port_id);
7256                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7257                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7258                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7259                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7260                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7261                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7262                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7263                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7264                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7265                 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7266                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7267                         bp->flags |= BNXT_FLAG_WOL_CAP;
7268         } else {
7269 #ifdef CONFIG_BNXT_SRIOV
7270                 struct bnxt_vf_info *vf = &bp->vf;
7271
7272                 vf->fw_fid = le16_to_cpu(resp->fid);
7273                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7274 #endif
7275         }
7276
7277 hwrm_func_qcaps_exit:
7278         mutex_unlock(&bp->hwrm_cmd_lock);
7279         return rc;
7280 }
7281
7282 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7283
7284 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7285 {
7286         int rc;
7287
7288         rc = __bnxt_hwrm_func_qcaps(bp);
7289         if (rc)
7290                 return rc;
7291         rc = bnxt_hwrm_queue_qportcfg(bp);
7292         if (rc) {
7293                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7294                 return rc;
7295         }
7296         if (bp->hwrm_spec_code >= 0x10803) {
7297                 rc = bnxt_alloc_ctx_mem(bp);
7298                 if (rc)
7299                         return rc;
7300                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7301                 if (!rc)
7302                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7303         }
7304         return 0;
7305 }
7306
7307 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7308 {
7309         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7310         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7311         int rc = 0;
7312         u32 flags;
7313
7314         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7315                 return 0;
7316
7317         resp = bp->hwrm_cmd_resp_addr;
7318         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7319
7320         mutex_lock(&bp->hwrm_cmd_lock);
7321         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7322         if (rc)
7323                 goto hwrm_cfa_adv_qcaps_exit;
7324
7325         flags = le32_to_cpu(resp->flags);
7326         if (flags &
7327             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7328                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7329
7330 hwrm_cfa_adv_qcaps_exit:
7331         mutex_unlock(&bp->hwrm_cmd_lock);
7332         return rc;
7333 }
7334
7335 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7336 {
7337         struct bnxt_fw_health *fw_health = bp->fw_health;
7338         u32 reg_base = 0xffffffff;
7339         int i;
7340
7341         /* Only pre-map the monitoring GRC registers using window 3 */
7342         for (i = 0; i < 4; i++) {
7343                 u32 reg = fw_health->regs[i];
7344
7345                 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7346                         continue;
7347                 if (reg_base == 0xffffffff)
7348                         reg_base = reg & BNXT_GRC_BASE_MASK;
7349                 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7350                         return -ERANGE;
7351                 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
7352                                             (reg & BNXT_GRC_OFFSET_MASK);
7353         }
7354         if (reg_base == 0xffffffff)
7355                 return 0;
7356
7357         writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7358                          BNXT_FW_HEALTH_WIN_MAP_OFF);
7359         return 0;
7360 }
7361
7362 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7363 {
7364         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7365         struct bnxt_fw_health *fw_health = bp->fw_health;
7366         struct hwrm_error_recovery_qcfg_input req = {0};
7367         int rc, i;
7368
7369         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7370                 return 0;
7371
7372         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7373         mutex_lock(&bp->hwrm_cmd_lock);
7374         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7375         if (rc)
7376                 goto err_recovery_out;
7377         fw_health->flags = le32_to_cpu(resp->flags);
7378         if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7379             !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7380                 rc = -EINVAL;
7381                 goto err_recovery_out;
7382         }
7383         fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7384         fw_health->master_func_wait_dsecs =
7385                 le32_to_cpu(resp->master_func_wait_period);
7386         fw_health->normal_func_wait_dsecs =
7387                 le32_to_cpu(resp->normal_func_wait_period);
7388         fw_health->post_reset_wait_dsecs =
7389                 le32_to_cpu(resp->master_func_wait_period_after_reset);
7390         fw_health->post_reset_max_wait_dsecs =
7391                 le32_to_cpu(resp->max_bailout_time_after_reset);
7392         fw_health->regs[BNXT_FW_HEALTH_REG] =
7393                 le32_to_cpu(resp->fw_health_status_reg);
7394         fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7395                 le32_to_cpu(resp->fw_heartbeat_reg);
7396         fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7397                 le32_to_cpu(resp->fw_reset_cnt_reg);
7398         fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7399                 le32_to_cpu(resp->reset_inprogress_reg);
7400         fw_health->fw_reset_inprog_reg_mask =
7401                 le32_to_cpu(resp->reset_inprogress_reg_mask);
7402         fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7403         if (fw_health->fw_reset_seq_cnt >= 16) {
7404                 rc = -EINVAL;
7405                 goto err_recovery_out;
7406         }
7407         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7408                 fw_health->fw_reset_seq_regs[i] =
7409                         le32_to_cpu(resp->reset_reg[i]);
7410                 fw_health->fw_reset_seq_vals[i] =
7411                         le32_to_cpu(resp->reset_reg_val[i]);
7412                 fw_health->fw_reset_seq_delay_msec[i] =
7413                         resp->delay_after_reset[i];
7414         }
7415 err_recovery_out:
7416         mutex_unlock(&bp->hwrm_cmd_lock);
7417         if (!rc)
7418                 rc = bnxt_map_fw_health_regs(bp);
7419         if (rc)
7420                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7421         return rc;
7422 }
7423
7424 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7425 {
7426         struct hwrm_func_reset_input req = {0};
7427
7428         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7429         req.enables = 0;
7430
7431         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7432 }
7433
7434 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7435 {
7436         int rc = 0;
7437         struct hwrm_queue_qportcfg_input req = {0};
7438         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7439         u8 i, j, *qptr;
7440         bool no_rdma;
7441
7442         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7443
7444         mutex_lock(&bp->hwrm_cmd_lock);
7445         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7446         if (rc)
7447                 goto qportcfg_exit;
7448
7449         if (!resp->max_configurable_queues) {
7450                 rc = -EINVAL;
7451                 goto qportcfg_exit;
7452         }
7453         bp->max_tc = resp->max_configurable_queues;
7454         bp->max_lltc = resp->max_configurable_lossless_queues;
7455         if (bp->max_tc > BNXT_MAX_QUEUE)
7456                 bp->max_tc = BNXT_MAX_QUEUE;
7457
7458         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7459         qptr = &resp->queue_id0;
7460         for (i = 0, j = 0; i < bp->max_tc; i++) {
7461                 bp->q_info[j].queue_id = *qptr;
7462                 bp->q_ids[i] = *qptr++;
7463                 bp->q_info[j].queue_profile = *qptr++;
7464                 bp->tc_to_qidx[j] = j;
7465                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7466                     (no_rdma && BNXT_PF(bp)))
7467                         j++;
7468         }
7469         bp->max_q = bp->max_tc;
7470         bp->max_tc = max_t(u8, j, 1);
7471
7472         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7473                 bp->max_tc = 1;
7474
7475         if (bp->max_lltc > bp->max_tc)
7476                 bp->max_lltc = bp->max_tc;
7477
7478 qportcfg_exit:
7479         mutex_unlock(&bp->hwrm_cmd_lock);
7480         return rc;
7481 }
7482
7483 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7484 {
7485         struct hwrm_ver_get_input req = {0};
7486         int rc;
7487
7488         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7489         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7490         req.hwrm_intf_min = HWRM_VERSION_MINOR;
7491         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7492
7493         rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7494                                    silent);
7495         return rc;
7496 }
7497
7498 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7499 {
7500         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7501         u16 fw_maj, fw_min, fw_bld, fw_rsv;
7502         u32 dev_caps_cfg, hwrm_ver;
7503         int rc, len;
7504
7505         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7506         mutex_lock(&bp->hwrm_cmd_lock);
7507         rc = __bnxt_hwrm_ver_get(bp, false);
7508         if (rc)
7509                 goto hwrm_ver_get_exit;
7510
7511         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7512
7513         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7514                              resp->hwrm_intf_min_8b << 8 |
7515                              resp->hwrm_intf_upd_8b;
7516         if (resp->hwrm_intf_maj_8b < 1) {
7517                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7518                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7519                             resp->hwrm_intf_upd_8b);
7520                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7521         }
7522
7523         hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7524                         HWRM_VERSION_UPDATE;
7525
7526         if (bp->hwrm_spec_code > hwrm_ver)
7527                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7528                          HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7529                          HWRM_VERSION_UPDATE);
7530         else
7531                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7532                          resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7533                          resp->hwrm_intf_upd_8b);
7534
7535         fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7536         if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7537                 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7538                 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7539                 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7540                 len = FW_VER_STR_LEN;
7541         } else {
7542                 fw_maj = resp->hwrm_fw_maj_8b;
7543                 fw_min = resp->hwrm_fw_min_8b;
7544                 fw_bld = resp->hwrm_fw_bld_8b;
7545                 fw_rsv = resp->hwrm_fw_rsvd_8b;
7546                 len = BC_HWRM_STR_LEN;
7547         }
7548         bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7549         snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7550                  fw_rsv);
7551
7552         if (strlen(resp->active_pkg_name)) {
7553                 int fw_ver_len = strlen(bp->fw_ver_str);
7554
7555                 snprintf(bp->fw_ver_str + fw_ver_len,
7556                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7557                          resp->active_pkg_name);
7558                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7559         }
7560
7561         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7562         if (!bp->hwrm_cmd_timeout)
7563                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7564
7565         if (resp->hwrm_intf_maj_8b >= 1) {
7566                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7567                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7568         }
7569         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7570                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7571
7572         bp->chip_num = le16_to_cpu(resp->chip_num);
7573         bp->chip_rev = resp->chip_rev;
7574         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7575             !resp->chip_metal)
7576                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7577
7578         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7579         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7580             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7581                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7582
7583         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7584                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7585
7586         if (dev_caps_cfg &
7587             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7588                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7589
7590         if (dev_caps_cfg &
7591             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7592                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7593
7594         if (dev_caps_cfg &
7595             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7596                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7597
7598 hwrm_ver_get_exit:
7599         mutex_unlock(&bp->hwrm_cmd_lock);
7600         return rc;
7601 }
7602
7603 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7604 {
7605         struct hwrm_fw_set_time_input req = {0};
7606         struct tm tm;
7607         time64_t now = ktime_get_real_seconds();
7608
7609         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7610             bp->hwrm_spec_code < 0x10400)
7611                 return -EOPNOTSUPP;
7612
7613         time64_to_tm(now, 0, &tm);
7614         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7615         req.year = cpu_to_le16(1900 + tm.tm_year);
7616         req.month = 1 + tm.tm_mon;
7617         req.day = tm.tm_mday;
7618         req.hour = tm.tm_hour;
7619         req.minute = tm.tm_min;
7620         req.second = tm.tm_sec;
7621         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7622 }
7623
7624 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
7625 {
7626         u64 sw_tmp;
7627
7628         sw_tmp = (*sw & ~mask) | hw;
7629         if (hw < (*sw & mask))
7630                 sw_tmp += mask + 1;
7631         WRITE_ONCE(*sw, sw_tmp);
7632 }
7633
7634 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
7635                                     int count, bool ignore_zero)
7636 {
7637         int i;
7638
7639         for (i = 0; i < count; i++) {
7640                 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
7641
7642                 if (ignore_zero && !hw)
7643                         continue;
7644
7645                 if (masks[i] == -1ULL)
7646                         sw_stats[i] = hw;
7647                 else
7648                         bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
7649         }
7650 }
7651
7652 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
7653 {
7654         if (!stats->hw_stats)
7655                 return;
7656
7657         __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
7658                                 stats->hw_masks, stats->len / 8, false);
7659 }
7660
7661 static void bnxt_accumulate_all_stats(struct bnxt *bp)
7662 {
7663         struct bnxt_stats_mem *ring0_stats;
7664         bool ignore_zero = false;
7665         int i;
7666
7667         /* Chip bug.  Counter intermittently becomes 0. */
7668         if (bp->flags & BNXT_FLAG_CHIP_P5)
7669                 ignore_zero = true;
7670
7671         for (i = 0; i < bp->cp_nr_rings; i++) {
7672                 struct bnxt_napi *bnapi = bp->bnapi[i];
7673                 struct bnxt_cp_ring_info *cpr;
7674                 struct bnxt_stats_mem *stats;
7675
7676                 cpr = &bnapi->cp_ring;
7677                 stats = &cpr->stats;
7678                 if (!i)
7679                         ring0_stats = stats;
7680                 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
7681                                         ring0_stats->hw_masks,
7682                                         ring0_stats->len / 8, ignore_zero);
7683         }
7684         if (bp->flags & BNXT_FLAG_PORT_STATS) {
7685                 struct bnxt_stats_mem *stats = &bp->port_stats;
7686                 __le64 *hw_stats = stats->hw_stats;
7687                 u64 *sw_stats = stats->sw_stats;
7688                 u64 *masks = stats->hw_masks;
7689                 int cnt;
7690
7691                 cnt = sizeof(struct rx_port_stats) / 8;
7692                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
7693
7694                 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7695                 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7696                 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7697                 cnt = sizeof(struct tx_port_stats) / 8;
7698                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
7699         }
7700         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
7701                 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
7702                 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
7703         }
7704 }
7705
7706 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
7707 {
7708         struct bnxt_pf_info *pf = &bp->pf;
7709         struct hwrm_port_qstats_input req = {0};
7710
7711         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7712                 return 0;
7713
7714         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
7715                 return -EOPNOTSUPP;
7716
7717         req.flags = flags;
7718         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7719         req.port_id = cpu_to_le16(pf->port_id);
7720         req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
7721                                             BNXT_TX_PORT_STATS_BYTE_OFFSET);
7722         req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
7723         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7724 }
7725
7726 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
7727 {
7728         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
7729         struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
7730         struct hwrm_port_qstats_ext_input req = {0};
7731         struct bnxt_pf_info *pf = &bp->pf;
7732         u32 tx_stat_size;
7733         int rc;
7734
7735         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7736                 return 0;
7737
7738         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
7739                 return -EOPNOTSUPP;
7740
7741         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7742         req.flags = flags;
7743         req.port_id = cpu_to_le16(pf->port_id);
7744         req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7745         req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
7746         tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
7747                        sizeof(struct tx_port_stats_ext) : 0;
7748         req.tx_stat_size = cpu_to_le16(tx_stat_size);
7749         req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
7750         mutex_lock(&bp->hwrm_cmd_lock);
7751         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7752         if (!rc) {
7753                 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
7754                 bp->fw_tx_stats_ext_size = tx_stat_size ?
7755                         le16_to_cpu(resp->tx_stat_size) / 8 : 0;
7756         } else {
7757                 bp->fw_rx_stats_ext_size = 0;
7758                 bp->fw_tx_stats_ext_size = 0;
7759         }
7760         if (flags)
7761                 goto qstats_done;
7762
7763         if (bp->fw_tx_stats_ext_size <=
7764             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7765                 mutex_unlock(&bp->hwrm_cmd_lock);
7766                 bp->pri2cos_valid = 0;
7767                 return rc;
7768         }
7769
7770         bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
7771         req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
7772
7773         rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
7774         if (!rc) {
7775                 struct hwrm_queue_pri2cos_qcfg_output *resp2;
7776                 u8 *pri2cos;
7777                 int i, j;
7778
7779                 resp2 = bp->hwrm_cmd_resp_addr;
7780                 pri2cos = &resp2->pri0_cos_queue_id;
7781                 for (i = 0; i < 8; i++) {
7782                         u8 queue_id = pri2cos[i];
7783                         u8 queue_idx;
7784
7785                         /* Per port queue IDs start from 0, 10, 20, etc */
7786                         queue_idx = queue_id % 10;
7787                         if (queue_idx > BNXT_MAX_QUEUE) {
7788                                 bp->pri2cos_valid = false;
7789                                 goto qstats_done;
7790                         }
7791                         for (j = 0; j < bp->max_q; j++) {
7792                                 if (bp->q_ids[j] == queue_id)
7793                                         bp->pri2cos_idx[i] = queue_idx;
7794                         }
7795                 }
7796                 bp->pri2cos_valid = 1;
7797         }
7798 qstats_done:
7799         mutex_unlock(&bp->hwrm_cmd_lock);
7800         return rc;
7801 }
7802
7803 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
7804 {
7805         if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
7806                 bnxt_hwrm_tunnel_dst_port_free(
7807                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7808         if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
7809                 bnxt_hwrm_tunnel_dst_port_free(
7810                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7811 }
7812
7813 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
7814 {
7815         int rc, i;
7816         u32 tpa_flags = 0;
7817
7818         if (set_tpa)
7819                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
7820         else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
7821                 return 0;
7822         for (i = 0; i < bp->nr_vnics; i++) {
7823                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
7824                 if (rc) {
7825                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
7826                                    i, rc);
7827                         return rc;
7828                 }
7829         }
7830         return 0;
7831 }
7832
7833 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7834 {
7835         int i;
7836
7837         for (i = 0; i < bp->nr_vnics; i++)
7838                 bnxt_hwrm_vnic_set_rss(bp, i, false);
7839 }
7840
7841 static void bnxt_clear_vnic(struct bnxt *bp)
7842 {
7843         if (!bp->vnic_info)
7844                 return;
7845
7846         bnxt_hwrm_clear_vnic_filter(bp);
7847         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
7848                 /* clear all RSS setting before free vnic ctx */
7849                 bnxt_hwrm_clear_vnic_rss(bp);
7850                 bnxt_hwrm_vnic_ctx_free(bp);
7851         }
7852         /* before free the vnic, undo the vnic tpa settings */
7853         if (bp->flags & BNXT_FLAG_TPA)
7854                 bnxt_set_tpa(bp, false);
7855         bnxt_hwrm_vnic_free(bp);
7856         if (bp->flags & BNXT_FLAG_CHIP_P5)
7857                 bnxt_hwrm_vnic_ctx_free(bp);
7858 }
7859
7860 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7861                                     bool irq_re_init)
7862 {
7863         bnxt_clear_vnic(bp);
7864         bnxt_hwrm_ring_free(bp, close_path);
7865         bnxt_hwrm_ring_grp_free(bp);
7866         if (irq_re_init) {
7867                 bnxt_hwrm_stat_ctx_free(bp);
7868                 bnxt_hwrm_free_tunnel_ports(bp);
7869         }
7870 }
7871
7872 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
7873 {
7874         struct hwrm_func_cfg_input req = {0};
7875
7876         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7877         req.fid = cpu_to_le16(0xffff);
7878         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
7879         if (br_mode == BRIDGE_MODE_VEB)
7880                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
7881         else if (br_mode == BRIDGE_MODE_VEPA)
7882                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
7883         else
7884                 return -EINVAL;
7885         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7886 }
7887
7888 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
7889 {
7890         struct hwrm_func_cfg_input req = {0};
7891
7892         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
7893                 return 0;
7894
7895         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7896         req.fid = cpu_to_le16(0xffff);
7897         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
7898         req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
7899         if (size == 128)
7900                 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
7901
7902         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7903 }
7904
7905 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7906 {
7907         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
7908         int rc;
7909
7910         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
7911                 goto skip_rss_ctx;
7912
7913         /* allocate context for vnic */
7914         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
7915         if (rc) {
7916                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7917                            vnic_id, rc);
7918                 goto vnic_setup_err;
7919         }
7920         bp->rsscos_nr_ctxs++;
7921
7922         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7923                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
7924                 if (rc) {
7925                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
7926                                    vnic_id, rc);
7927                         goto vnic_setup_err;
7928                 }
7929                 bp->rsscos_nr_ctxs++;
7930         }
7931
7932 skip_rss_ctx:
7933         /* configure default vnic, ring grp */
7934         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7935         if (rc) {
7936                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7937                            vnic_id, rc);
7938                 goto vnic_setup_err;
7939         }
7940
7941         /* Enable RSS hashing on vnic */
7942         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
7943         if (rc) {
7944                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
7945                            vnic_id, rc);
7946                 goto vnic_setup_err;
7947         }
7948
7949         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7950                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7951                 if (rc) {
7952                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7953                                    vnic_id, rc);
7954                 }
7955         }
7956
7957 vnic_setup_err:
7958         return rc;
7959 }
7960
7961 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
7962 {
7963         int rc, i, nr_ctxs;
7964
7965         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
7966         for (i = 0; i < nr_ctxs; i++) {
7967                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
7968                 if (rc) {
7969                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
7970                                    vnic_id, i, rc);
7971                         break;
7972                 }
7973                 bp->rsscos_nr_ctxs++;
7974         }
7975         if (i < nr_ctxs)
7976                 return -ENOMEM;
7977
7978         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
7979         if (rc) {
7980                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
7981                            vnic_id, rc);
7982                 return rc;
7983         }
7984         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7985         if (rc) {
7986                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7987                            vnic_id, rc);
7988                 return rc;
7989         }
7990         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7991                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7992                 if (rc) {
7993                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7994                                    vnic_id, rc);
7995                 }
7996         }
7997         return rc;
7998 }
7999
8000 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8001 {
8002         if (bp->flags & BNXT_FLAG_CHIP_P5)
8003                 return __bnxt_setup_vnic_p5(bp, vnic_id);
8004         else
8005                 return __bnxt_setup_vnic(bp, vnic_id);
8006 }
8007
8008 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8009 {
8010 #ifdef CONFIG_RFS_ACCEL
8011         int i, rc = 0;
8012
8013         if (bp->flags & BNXT_FLAG_CHIP_P5)
8014                 return 0;
8015
8016         for (i = 0; i < bp->rx_nr_rings; i++) {
8017                 struct bnxt_vnic_info *vnic;
8018                 u16 vnic_id = i + 1;
8019                 u16 ring_id = i;
8020
8021                 if (vnic_id >= bp->nr_vnics)
8022                         break;
8023
8024                 vnic = &bp->vnic_info[vnic_id];
8025                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8026                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8027                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8028                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8029                 if (rc) {
8030                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8031                                    vnic_id, rc);
8032                         break;
8033                 }
8034                 rc = bnxt_setup_vnic(bp, vnic_id);
8035                 if (rc)
8036                         break;
8037         }
8038         return rc;
8039 #else
8040         return 0;
8041 #endif
8042 }
8043
8044 /* Allow PF and VF with default VLAN to be in promiscuous mode */
8045 static bool bnxt_promisc_ok(struct bnxt *bp)
8046 {
8047 #ifdef CONFIG_BNXT_SRIOV
8048         if (BNXT_VF(bp) && !bp->vf.vlan)
8049                 return false;
8050 #endif
8051         return true;
8052 }
8053
8054 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8055 {
8056         unsigned int rc = 0;
8057
8058         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8059         if (rc) {
8060                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8061                            rc);
8062                 return rc;
8063         }
8064
8065         rc = bnxt_hwrm_vnic_cfg(bp, 1);
8066         if (rc) {
8067                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8068                            rc);
8069                 return rc;
8070         }
8071         return rc;
8072 }
8073
8074 static int bnxt_cfg_rx_mode(struct bnxt *);
8075 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8076
8077 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8078 {
8079         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8080         int rc = 0;
8081         unsigned int rx_nr_rings = bp->rx_nr_rings;
8082
8083         if (irq_re_init) {
8084                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8085                 if (rc) {
8086                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8087                                    rc);
8088                         goto err_out;
8089                 }
8090         }
8091
8092         rc = bnxt_hwrm_ring_alloc(bp);
8093         if (rc) {
8094                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8095                 goto err_out;
8096         }
8097
8098         rc = bnxt_hwrm_ring_grp_alloc(bp);
8099         if (rc) {
8100                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8101                 goto err_out;
8102         }
8103
8104         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8105                 rx_nr_rings--;
8106
8107         /* default vnic 0 */
8108         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8109         if (rc) {
8110                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8111                 goto err_out;
8112         }
8113
8114         rc = bnxt_setup_vnic(bp, 0);
8115         if (rc)
8116                 goto err_out;
8117
8118         if (bp->flags & BNXT_FLAG_RFS) {
8119                 rc = bnxt_alloc_rfs_vnics(bp);
8120                 if (rc)
8121                         goto err_out;
8122         }
8123
8124         if (bp->flags & BNXT_FLAG_TPA) {
8125                 rc = bnxt_set_tpa(bp, true);
8126                 if (rc)
8127                         goto err_out;
8128         }
8129
8130         if (BNXT_VF(bp))
8131                 bnxt_update_vf_mac(bp);
8132
8133         /* Filter for default vnic 0 */
8134         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8135         if (rc) {
8136                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8137                 goto err_out;
8138         }
8139         vnic->uc_filter_count = 1;
8140
8141         vnic->rx_mask = 0;
8142         if (bp->dev->flags & IFF_BROADCAST)
8143                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8144
8145         if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
8146                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8147
8148         if (bp->dev->flags & IFF_ALLMULTI) {
8149                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8150                 vnic->mc_list_count = 0;
8151         } else {
8152                 u32 mask = 0;
8153
8154                 bnxt_mc_list_updated(bp, &mask);
8155                 vnic->rx_mask |= mask;
8156         }
8157
8158         rc = bnxt_cfg_rx_mode(bp);
8159         if (rc)
8160                 goto err_out;
8161
8162         rc = bnxt_hwrm_set_coal(bp);
8163         if (rc)
8164                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8165                                 rc);
8166
8167         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8168                 rc = bnxt_setup_nitroa0_vnic(bp);
8169                 if (rc)
8170                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8171                                    rc);
8172         }
8173
8174         if (BNXT_VF(bp)) {
8175                 bnxt_hwrm_func_qcfg(bp);
8176                 netdev_update_features(bp->dev);
8177         }
8178
8179         return 0;
8180
8181 err_out:
8182         bnxt_hwrm_resource_free(bp, 0, true);
8183
8184         return rc;
8185 }
8186
8187 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8188 {
8189         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8190         return 0;
8191 }
8192
8193 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8194 {
8195         bnxt_init_cp_rings(bp);
8196         bnxt_init_rx_rings(bp);
8197         bnxt_init_tx_rings(bp);
8198         bnxt_init_ring_grps(bp, irq_re_init);
8199         bnxt_init_vnics(bp);
8200
8201         return bnxt_init_chip(bp, irq_re_init);
8202 }
8203
8204 static int bnxt_set_real_num_queues(struct bnxt *bp)
8205 {
8206         int rc;
8207         struct net_device *dev = bp->dev;
8208
8209         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8210                                           bp->tx_nr_rings_xdp);
8211         if (rc)
8212                 return rc;
8213
8214         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8215         if (rc)
8216                 return rc;
8217
8218 #ifdef CONFIG_RFS_ACCEL
8219         if (bp->flags & BNXT_FLAG_RFS)
8220                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8221 #endif
8222
8223         return rc;
8224 }
8225
8226 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8227                            bool shared)
8228 {
8229         int _rx = *rx, _tx = *tx;
8230
8231         if (shared) {
8232                 *rx = min_t(int, _rx, max);
8233                 *tx = min_t(int, _tx, max);
8234         } else {
8235                 if (max < 2)
8236                         return -ENOMEM;
8237
8238                 while (_rx + _tx > max) {
8239                         if (_rx > _tx && _rx > 1)
8240                                 _rx--;
8241                         else if (_tx > 1)
8242                                 _tx--;
8243                 }
8244                 *rx = _rx;
8245                 *tx = _tx;
8246         }
8247         return 0;
8248 }
8249
8250 static void bnxt_setup_msix(struct bnxt *bp)
8251 {
8252         const int len = sizeof(bp->irq_tbl[0].name);
8253         struct net_device *dev = bp->dev;
8254         int tcs, i;
8255
8256         tcs = netdev_get_num_tc(dev);
8257         if (tcs) {
8258                 int i, off, count;
8259
8260                 for (i = 0; i < tcs; i++) {
8261                         count = bp->tx_nr_rings_per_tc;
8262                         off = i * count;
8263                         netdev_set_tc_queue(dev, i, count, off);
8264                 }
8265         }
8266
8267         for (i = 0; i < bp->cp_nr_rings; i++) {
8268                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8269                 char *attr;
8270
8271                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8272                         attr = "TxRx";
8273                 else if (i < bp->rx_nr_rings)
8274                         attr = "rx";
8275                 else
8276                         attr = "tx";
8277
8278                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8279                          attr, i);
8280                 bp->irq_tbl[map_idx].handler = bnxt_msix;
8281         }
8282 }
8283
8284 static void bnxt_setup_inta(struct bnxt *bp)
8285 {
8286         const int len = sizeof(bp->irq_tbl[0].name);
8287
8288         if (netdev_get_num_tc(bp->dev))
8289                 netdev_reset_tc(bp->dev);
8290
8291         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8292                  0);
8293         bp->irq_tbl[0].handler = bnxt_inta;
8294 }
8295
8296 static int bnxt_setup_int_mode(struct bnxt *bp)
8297 {
8298         int rc;
8299
8300         if (bp->flags & BNXT_FLAG_USING_MSIX)
8301                 bnxt_setup_msix(bp);
8302         else
8303                 bnxt_setup_inta(bp);
8304
8305         rc = bnxt_set_real_num_queues(bp);
8306         return rc;
8307 }
8308
8309 #ifdef CONFIG_RFS_ACCEL
8310 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8311 {
8312         return bp->hw_resc.max_rsscos_ctxs;
8313 }
8314
8315 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8316 {
8317         return bp->hw_resc.max_vnics;
8318 }
8319 #endif
8320
8321 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8322 {
8323         return bp->hw_resc.max_stat_ctxs;
8324 }
8325
8326 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8327 {
8328         return bp->hw_resc.max_cp_rings;
8329 }
8330
8331 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8332 {
8333         unsigned int cp = bp->hw_resc.max_cp_rings;
8334
8335         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8336                 cp -= bnxt_get_ulp_msix_num(bp);
8337
8338         return cp;
8339 }
8340
8341 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8342 {
8343         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8344
8345         if (bp->flags & BNXT_FLAG_CHIP_P5)
8346                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8347
8348         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8349 }
8350
8351 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8352 {
8353         bp->hw_resc.max_irqs = max_irqs;
8354 }
8355
8356 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8357 {
8358         unsigned int cp;
8359
8360         cp = bnxt_get_max_func_cp_rings_for_en(bp);
8361         if (bp->flags & BNXT_FLAG_CHIP_P5)
8362                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8363         else
8364                 return cp - bp->cp_nr_rings;
8365 }
8366
8367 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8368 {
8369         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8370 }
8371
8372 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8373 {
8374         int max_cp = bnxt_get_max_func_cp_rings(bp);
8375         int max_irq = bnxt_get_max_func_irqs(bp);
8376         int total_req = bp->cp_nr_rings + num;
8377         int max_idx, avail_msix;
8378
8379         max_idx = bp->total_irqs;
8380         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8381                 max_idx = min_t(int, bp->total_irqs, max_cp);
8382         avail_msix = max_idx - bp->cp_nr_rings;
8383         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8384                 return avail_msix;
8385
8386         if (max_irq < total_req) {
8387                 num = max_irq - bp->cp_nr_rings;
8388                 if (num <= 0)
8389                         return 0;
8390         }
8391         return num;
8392 }
8393
8394 static int bnxt_get_num_msix(struct bnxt *bp)
8395 {
8396         if (!BNXT_NEW_RM(bp))
8397                 return bnxt_get_max_func_irqs(bp);
8398
8399         return bnxt_nq_rings_in_use(bp);
8400 }
8401
8402 static int bnxt_init_msix(struct bnxt *bp)
8403 {
8404         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8405         struct msix_entry *msix_ent;
8406
8407         total_vecs = bnxt_get_num_msix(bp);
8408         max = bnxt_get_max_func_irqs(bp);
8409         if (total_vecs > max)
8410                 total_vecs = max;
8411
8412         if (!total_vecs)
8413                 return 0;
8414
8415         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8416         if (!msix_ent)
8417                 return -ENOMEM;
8418
8419         for (i = 0; i < total_vecs; i++) {
8420                 msix_ent[i].entry = i;
8421                 msix_ent[i].vector = 0;
8422         }
8423
8424         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8425                 min = 2;
8426
8427         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8428         ulp_msix = bnxt_get_ulp_msix_num(bp);
8429         if (total_vecs < 0 || total_vecs < ulp_msix) {
8430                 rc = -ENODEV;
8431                 goto msix_setup_exit;
8432         }
8433
8434         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8435         if (bp->irq_tbl) {
8436                 for (i = 0; i < total_vecs; i++)
8437                         bp->irq_tbl[i].vector = msix_ent[i].vector;
8438
8439                 bp->total_irqs = total_vecs;
8440                 /* Trim rings based upon num of vectors allocated */
8441                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8442                                      total_vecs - ulp_msix, min == 1);
8443                 if (rc)
8444                         goto msix_setup_exit;
8445
8446                 bp->cp_nr_rings = (min == 1) ?
8447                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8448                                   bp->tx_nr_rings + bp->rx_nr_rings;
8449
8450         } else {
8451                 rc = -ENOMEM;
8452                 goto msix_setup_exit;
8453         }
8454         bp->flags |= BNXT_FLAG_USING_MSIX;
8455         kfree(msix_ent);
8456         return 0;
8457
8458 msix_setup_exit:
8459         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8460         kfree(bp->irq_tbl);
8461         bp->irq_tbl = NULL;
8462         pci_disable_msix(bp->pdev);
8463         kfree(msix_ent);
8464         return rc;
8465 }
8466
8467 static int bnxt_init_inta(struct bnxt *bp)
8468 {
8469         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
8470         if (!bp->irq_tbl)
8471                 return -ENOMEM;
8472
8473         bp->total_irqs = 1;
8474         bp->rx_nr_rings = 1;
8475         bp->tx_nr_rings = 1;
8476         bp->cp_nr_rings = 1;
8477         bp->flags |= BNXT_FLAG_SHARED_RINGS;
8478         bp->irq_tbl[0].vector = bp->pdev->irq;
8479         return 0;
8480 }
8481
8482 static int bnxt_init_int_mode(struct bnxt *bp)
8483 {
8484         int rc = 0;
8485
8486         if (bp->flags & BNXT_FLAG_MSIX_CAP)
8487                 rc = bnxt_init_msix(bp);
8488
8489         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8490                 /* fallback to INTA */
8491                 rc = bnxt_init_inta(bp);
8492         }
8493         return rc;
8494 }
8495
8496 static void bnxt_clear_int_mode(struct bnxt *bp)
8497 {
8498         if (bp->flags & BNXT_FLAG_USING_MSIX)
8499                 pci_disable_msix(bp->pdev);
8500
8501         kfree(bp->irq_tbl);
8502         bp->irq_tbl = NULL;
8503         bp->flags &= ~BNXT_FLAG_USING_MSIX;
8504 }
8505
8506 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8507 {
8508         int tcs = netdev_get_num_tc(bp->dev);
8509         bool irq_cleared = false;
8510         int rc;
8511
8512         if (!bnxt_need_reserve_rings(bp))
8513                 return 0;
8514
8515         if (irq_re_init && BNXT_NEW_RM(bp) &&
8516             bnxt_get_num_msix(bp) != bp->total_irqs) {
8517                 bnxt_ulp_irq_stop(bp);
8518                 bnxt_clear_int_mode(bp);
8519                 irq_cleared = true;
8520         }
8521         rc = __bnxt_reserve_rings(bp);
8522         if (irq_cleared) {
8523                 if (!rc)
8524                         rc = bnxt_init_int_mode(bp);
8525                 bnxt_ulp_irq_restart(bp, rc);
8526         }
8527         if (rc) {
8528                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8529                 return rc;
8530         }
8531         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8532                 netdev_err(bp->dev, "tx ring reservation failure\n");
8533                 netdev_reset_tc(bp->dev);
8534                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8535                 return -ENOMEM;
8536         }
8537         return 0;
8538 }
8539
8540 static void bnxt_free_irq(struct bnxt *bp)
8541 {
8542         struct bnxt_irq *irq;
8543         int i;
8544
8545 #ifdef CONFIG_RFS_ACCEL
8546         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8547         bp->dev->rx_cpu_rmap = NULL;
8548 #endif
8549         if (!bp->irq_tbl || !bp->bnapi)
8550                 return;
8551
8552         for (i = 0; i < bp->cp_nr_rings; i++) {
8553                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8554
8555                 irq = &bp->irq_tbl[map_idx];
8556                 if (irq->requested) {
8557                         if (irq->have_cpumask) {
8558                                 irq_set_affinity_hint(irq->vector, NULL);
8559                                 free_cpumask_var(irq->cpu_mask);
8560                                 irq->have_cpumask = 0;
8561                         }
8562                         free_irq(irq->vector, bp->bnapi[i]);
8563                 }
8564
8565                 irq->requested = 0;
8566         }
8567 }
8568
8569 static int bnxt_request_irq(struct bnxt *bp)
8570 {
8571         int i, j, rc = 0;
8572         unsigned long flags = 0;
8573 #ifdef CONFIG_RFS_ACCEL
8574         struct cpu_rmap *rmap;
8575 #endif
8576
8577         rc = bnxt_setup_int_mode(bp);
8578         if (rc) {
8579                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8580                            rc);
8581                 return rc;
8582         }
8583 #ifdef CONFIG_RFS_ACCEL
8584         rmap = bp->dev->rx_cpu_rmap;
8585 #endif
8586         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8587                 flags = IRQF_SHARED;
8588
8589         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
8590                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8591                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8592
8593 #ifdef CONFIG_RFS_ACCEL
8594                 if (rmap && bp->bnapi[i]->rx_ring) {
8595                         rc = irq_cpu_rmap_add(rmap, irq->vector);
8596                         if (rc)
8597                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
8598                                             j);
8599                         j++;
8600                 }
8601 #endif
8602                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8603                                  bp->bnapi[i]);
8604                 if (rc)
8605                         break;
8606
8607                 irq->requested = 1;
8608
8609                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8610                         int numa_node = dev_to_node(&bp->pdev->dev);
8611
8612                         irq->have_cpumask = 1;
8613                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8614                                         irq->cpu_mask);
8615                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8616                         if (rc) {
8617                                 netdev_warn(bp->dev,
8618                                             "Set affinity failed, IRQ = %d\n",
8619                                             irq->vector);
8620                                 break;
8621                         }
8622                 }
8623         }
8624         return rc;
8625 }
8626
8627 static void bnxt_del_napi(struct bnxt *bp)
8628 {
8629         int i;
8630
8631         if (!bp->bnapi)
8632                 return;
8633
8634         for (i = 0; i < bp->cp_nr_rings; i++) {
8635                 struct bnxt_napi *bnapi = bp->bnapi[i];
8636
8637                 napi_hash_del(&bnapi->napi);
8638                 netif_napi_del(&bnapi->napi);
8639         }
8640         /* We called napi_hash_del() before netif_napi_del(), we need
8641          * to respect an RCU grace period before freeing napi structures.
8642          */
8643         synchronize_net();
8644 }
8645
8646 static void bnxt_init_napi(struct bnxt *bp)
8647 {
8648         int i;
8649         unsigned int cp_nr_rings = bp->cp_nr_rings;
8650         struct bnxt_napi *bnapi;
8651
8652         if (bp->flags & BNXT_FLAG_USING_MSIX) {
8653                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8654
8655                 if (bp->flags & BNXT_FLAG_CHIP_P5)
8656                         poll_fn = bnxt_poll_p5;
8657                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8658                         cp_nr_rings--;
8659                 for (i = 0; i < cp_nr_rings; i++) {
8660                         bnapi = bp->bnapi[i];
8661                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8662                 }
8663                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8664                         bnapi = bp->bnapi[cp_nr_rings];
8665                         netif_napi_add(bp->dev, &bnapi->napi,
8666                                        bnxt_poll_nitroa0, 64);
8667                 }
8668         } else {
8669                 bnapi = bp->bnapi[0];
8670                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
8671         }
8672 }
8673
8674 static void bnxt_disable_napi(struct bnxt *bp)
8675 {
8676         int i;
8677
8678         if (!bp->bnapi)
8679                 return;
8680
8681         for (i = 0; i < bp->cp_nr_rings; i++) {
8682                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8683
8684                 if (bp->bnapi[i]->rx_ring)
8685                         cancel_work_sync(&cpr->dim.work);
8686
8687                 napi_disable(&bp->bnapi[i]->napi);
8688         }
8689 }
8690
8691 static void bnxt_enable_napi(struct bnxt *bp)
8692 {
8693         int i;
8694
8695         for (i = 0; i < bp->cp_nr_rings; i++) {
8696                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8697                 bp->bnapi[i]->in_reset = false;
8698
8699                 if (bp->bnapi[i]->rx_ring) {
8700                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
8701                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
8702                 }
8703                 napi_enable(&bp->bnapi[i]->napi);
8704         }
8705 }
8706
8707 void bnxt_tx_disable(struct bnxt *bp)
8708 {
8709         int i;
8710         struct bnxt_tx_ring_info *txr;
8711
8712         if (bp->tx_ring) {
8713                 for (i = 0; i < bp->tx_nr_rings; i++) {
8714                         txr = &bp->tx_ring[i];
8715                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
8716                 }
8717         }
8718         /* Stop all TX queues */
8719         netif_tx_disable(bp->dev);
8720         netif_carrier_off(bp->dev);
8721 }
8722
8723 void bnxt_tx_enable(struct bnxt *bp)
8724 {
8725         int i;
8726         struct bnxt_tx_ring_info *txr;
8727
8728         for (i = 0; i < bp->tx_nr_rings; i++) {
8729                 txr = &bp->tx_ring[i];
8730                 txr->dev_state = 0;
8731         }
8732         netif_tx_wake_all_queues(bp->dev);
8733         if (bp->link_info.link_up)
8734                 netif_carrier_on(bp->dev);
8735 }
8736
8737 static void bnxt_report_link(struct bnxt *bp)
8738 {
8739         if (bp->link_info.link_up) {
8740                 const char *duplex;
8741                 const char *flow_ctrl;
8742                 u32 speed;
8743                 u16 fec;
8744
8745                 netif_carrier_on(bp->dev);
8746                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
8747                         duplex = "full";
8748                 else
8749                         duplex = "half";
8750                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
8751                         flow_ctrl = "ON - receive & transmit";
8752                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
8753                         flow_ctrl = "ON - transmit";
8754                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
8755                         flow_ctrl = "ON - receive";
8756                 else
8757                         flow_ctrl = "none";
8758                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
8759                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
8760                             speed, duplex, flow_ctrl);
8761                 if (bp->flags & BNXT_FLAG_EEE_CAP)
8762                         netdev_info(bp->dev, "EEE is %s\n",
8763                                     bp->eee.eee_active ? "active" :
8764                                                          "not active");
8765                 fec = bp->link_info.fec_cfg;
8766                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
8767                         netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
8768                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
8769                                     (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
8770                                      (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
8771         } else {
8772                 netif_carrier_off(bp->dev);
8773                 netdev_err(bp->dev, "NIC Link is Down\n");
8774         }
8775 }
8776
8777 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
8778 {
8779         int rc = 0;
8780         struct hwrm_port_phy_qcaps_input req = {0};
8781         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8782         struct bnxt_link_info *link_info = &bp->link_info;
8783
8784         bp->flags &= ~BNXT_FLAG_EEE_CAP;
8785         if (bp->test_info)
8786                 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
8787                                           BNXT_TEST_FL_AN_PHY_LPBK);
8788         if (bp->hwrm_spec_code < 0x10201)
8789                 return 0;
8790
8791         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
8792
8793         mutex_lock(&bp->hwrm_cmd_lock);
8794         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8795         if (rc)
8796                 goto hwrm_phy_qcaps_exit;
8797
8798         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
8799                 struct ethtool_eee *eee = &bp->eee;
8800                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
8801
8802                 bp->flags |= BNXT_FLAG_EEE_CAP;
8803                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8804                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
8805                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
8806                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
8807                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
8808         }
8809         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
8810                 if (bp->test_info)
8811                         bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
8812         }
8813         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
8814                 if (bp->test_info)
8815                         bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
8816         }
8817         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
8818                 if (BNXT_PF(bp))
8819                         bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
8820         }
8821         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
8822                 bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
8823
8824         if (resp->supported_speeds_auto_mode)
8825                 link_info->support_auto_speeds =
8826                         le16_to_cpu(resp->supported_speeds_auto_mode);
8827
8828         bp->port_count = resp->port_cnt;
8829
8830 hwrm_phy_qcaps_exit:
8831         mutex_unlock(&bp->hwrm_cmd_lock);
8832         return rc;
8833 }
8834
8835 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
8836 {
8837         int rc = 0;
8838         struct bnxt_link_info *link_info = &bp->link_info;
8839         struct hwrm_port_phy_qcfg_input req = {0};
8840         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8841         u8 link_up = link_info->link_up;
8842         u16 diff;
8843
8844         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
8845
8846         mutex_lock(&bp->hwrm_cmd_lock);
8847         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8848         if (rc) {
8849                 mutex_unlock(&bp->hwrm_cmd_lock);
8850                 return rc;
8851         }
8852
8853         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
8854         link_info->phy_link_status = resp->link;
8855         link_info->duplex = resp->duplex_cfg;
8856         if (bp->hwrm_spec_code >= 0x10800)
8857                 link_info->duplex = resp->duplex_state;
8858         link_info->pause = resp->pause;
8859         link_info->auto_mode = resp->auto_mode;
8860         link_info->auto_pause_setting = resp->auto_pause;
8861         link_info->lp_pause = resp->link_partner_adv_pause;
8862         link_info->force_pause_setting = resp->force_pause;
8863         link_info->duplex_setting = resp->duplex_cfg;
8864         if (link_info->phy_link_status == BNXT_LINK_LINK)
8865                 link_info->link_speed = le16_to_cpu(resp->link_speed);
8866         else
8867                 link_info->link_speed = 0;
8868         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
8869         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
8870         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
8871         link_info->lp_auto_link_speeds =
8872                 le16_to_cpu(resp->link_partner_adv_speeds);
8873         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
8874         link_info->phy_ver[0] = resp->phy_maj;
8875         link_info->phy_ver[1] = resp->phy_min;
8876         link_info->phy_ver[2] = resp->phy_bld;
8877         link_info->media_type = resp->media_type;
8878         link_info->phy_type = resp->phy_type;
8879         link_info->transceiver = resp->xcvr_pkg_type;
8880         link_info->phy_addr = resp->eee_config_phy_addr &
8881                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
8882         link_info->module_status = resp->module_status;
8883
8884         if (bp->flags & BNXT_FLAG_EEE_CAP) {
8885                 struct ethtool_eee *eee = &bp->eee;
8886                 u16 fw_speeds;
8887
8888                 eee->eee_active = 0;
8889                 if (resp->eee_config_phy_addr &
8890                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
8891                         eee->eee_active = 1;
8892                         fw_speeds = le16_to_cpu(
8893                                 resp->link_partner_adv_eee_link_speed_mask);
8894                         eee->lp_advertised =
8895                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8896                 }
8897
8898                 /* Pull initial EEE config */
8899                 if (!chng_link_state) {
8900                         if (resp->eee_config_phy_addr &
8901                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
8902                                 eee->eee_enabled = 1;
8903
8904                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
8905                         eee->advertised =
8906                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8907
8908                         if (resp->eee_config_phy_addr &
8909                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
8910                                 __le32 tmr;
8911
8912                                 eee->tx_lpi_enabled = 1;
8913                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
8914                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
8915                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
8916                         }
8917                 }
8918         }
8919
8920         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8921         if (bp->hwrm_spec_code >= 0x10504)
8922                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8923
8924         /* TODO: need to add more logic to report VF link */
8925         if (chng_link_state) {
8926                 if (link_info->phy_link_status == BNXT_LINK_LINK)
8927                         link_info->link_up = 1;
8928                 else
8929                         link_info->link_up = 0;
8930                 if (link_up != link_info->link_up)
8931                         bnxt_report_link(bp);
8932         } else {
8933                 /* alwasy link down if not require to update link state */
8934                 link_info->link_up = 0;
8935         }
8936         mutex_unlock(&bp->hwrm_cmd_lock);
8937
8938         if (!BNXT_PHY_CFG_ABLE(bp))
8939                 return 0;
8940
8941         diff = link_info->support_auto_speeds ^ link_info->advertising;
8942         if ((link_info->support_auto_speeds | diff) !=
8943             link_info->support_auto_speeds) {
8944                 /* An advertised speed is no longer supported, so we need to
8945                  * update the advertisement settings.  Caller holds RTNL
8946                  * so we can modify link settings.
8947                  */
8948                 link_info->advertising = link_info->support_auto_speeds;
8949                 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
8950                         bnxt_hwrm_set_link_setting(bp, true, false);
8951         }
8952         return 0;
8953 }
8954
8955 static void bnxt_get_port_module_status(struct bnxt *bp)
8956 {
8957         struct bnxt_link_info *link_info = &bp->link_info;
8958         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
8959         u8 module_status;
8960
8961         if (bnxt_update_link(bp, true))
8962                 return;
8963
8964         module_status = link_info->module_status;
8965         switch (module_status) {
8966         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
8967         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
8968         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
8969                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
8970                             bp->pf.port_id);
8971                 if (bp->hwrm_spec_code >= 0x10201) {
8972                         netdev_warn(bp->dev, "Module part number %s\n",
8973                                     resp->phy_vendor_partnumber);
8974                 }
8975                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
8976                         netdev_warn(bp->dev, "TX is disabled\n");
8977                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
8978                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
8979         }
8980 }
8981
8982 static void
8983 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
8984 {
8985         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
8986                 if (bp->hwrm_spec_code >= 0x10201)
8987                         req->auto_pause =
8988                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
8989                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8990                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
8991                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8992                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
8993                 req->enables |=
8994                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8995         } else {
8996                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8997                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
8998                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8999                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9000                 req->enables |=
9001                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9002                 if (bp->hwrm_spec_code >= 0x10201) {
9003                         req->auto_pause = req->force_pause;
9004                         req->enables |= cpu_to_le32(
9005                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9006                 }
9007         }
9008 }
9009
9010 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
9011                                       struct hwrm_port_phy_cfg_input *req)
9012 {
9013         u8 autoneg = bp->link_info.autoneg;
9014         u16 fw_link_speed = bp->link_info.req_link_speed;
9015         u16 advertising = bp->link_info.advertising;
9016
9017         if (autoneg & BNXT_AUTONEG_SPEED) {
9018                 req->auto_mode |=
9019                         PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9020
9021                 req->enables |= cpu_to_le32(
9022                         PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9023                 req->auto_link_speed_mask = cpu_to_le16(advertising);
9024
9025                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9026                 req->flags |=
9027                         cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9028         } else {
9029                 req->force_link_speed = cpu_to_le16(fw_link_speed);
9030                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9031         }
9032
9033         /* tell chimp that the setting takes effect immediately */
9034         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9035 }
9036
9037 int bnxt_hwrm_set_pause(struct bnxt *bp)
9038 {
9039         struct hwrm_port_phy_cfg_input req = {0};
9040         int rc;
9041
9042         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9043         bnxt_hwrm_set_pause_common(bp, &req);
9044
9045         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9046             bp->link_info.force_link_chng)
9047                 bnxt_hwrm_set_link_common(bp, &req);
9048
9049         mutex_lock(&bp->hwrm_cmd_lock);
9050         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9051         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9052                 /* since changing of pause setting doesn't trigger any link
9053                  * change event, the driver needs to update the current pause
9054                  * result upon successfully return of the phy_cfg command
9055                  */
9056                 bp->link_info.pause =
9057                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9058                 bp->link_info.auto_pause_setting = 0;
9059                 if (!bp->link_info.force_link_chng)
9060                         bnxt_report_link(bp);
9061         }
9062         bp->link_info.force_link_chng = false;
9063         mutex_unlock(&bp->hwrm_cmd_lock);
9064         return rc;
9065 }
9066
9067 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9068                               struct hwrm_port_phy_cfg_input *req)
9069 {
9070         struct ethtool_eee *eee = &bp->eee;
9071
9072         if (eee->eee_enabled) {
9073                 u16 eee_speeds;
9074                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9075
9076                 if (eee->tx_lpi_enabled)
9077                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9078                 else
9079                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9080
9081                 req->flags |= cpu_to_le32(flags);
9082                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9083                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9084                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9085         } else {
9086                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9087         }
9088 }
9089
9090 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9091 {
9092         struct hwrm_port_phy_cfg_input req = {0};
9093
9094         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9095         if (set_pause)
9096                 bnxt_hwrm_set_pause_common(bp, &req);
9097
9098         bnxt_hwrm_set_link_common(bp, &req);
9099
9100         if (set_eee)
9101                 bnxt_hwrm_set_eee(bp, &req);
9102         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9103 }
9104
9105 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9106 {
9107         struct hwrm_port_phy_cfg_input req = {0};
9108
9109         if (!BNXT_SINGLE_PF(bp))
9110                 return 0;
9111
9112         if (pci_num_vf(bp->pdev))
9113                 return 0;
9114
9115         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9116         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9117         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9118 }
9119
9120 static int bnxt_fw_init_one(struct bnxt *bp);
9121
9122 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9123 {
9124         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9125         struct hwrm_func_drv_if_change_input req = {0};
9126         bool resc_reinit = false, fw_reset = false;
9127         u32 flags = 0;
9128         int rc;
9129
9130         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9131                 return 0;
9132
9133         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9134         if (up)
9135                 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9136         mutex_lock(&bp->hwrm_cmd_lock);
9137         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9138         if (!rc)
9139                 flags = le32_to_cpu(resp->flags);
9140         mutex_unlock(&bp->hwrm_cmd_lock);
9141         if (rc)
9142                 return rc;
9143
9144         if (!up)
9145                 return 0;
9146
9147         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9148                 resc_reinit = true;
9149         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9150                 fw_reset = true;
9151
9152         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9153                 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9154                 return -ENODEV;
9155         }
9156         if (resc_reinit || fw_reset) {
9157                 if (fw_reset) {
9158                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9159                                 bnxt_ulp_stop(bp);
9160                         bnxt_free_ctx_mem(bp);
9161                         kfree(bp->ctx);
9162                         bp->ctx = NULL;
9163                         bnxt_dcb_free(bp);
9164                         rc = bnxt_fw_init_one(bp);
9165                         if (rc) {
9166                                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9167                                 return rc;
9168                         }
9169                         bnxt_clear_int_mode(bp);
9170                         rc = bnxt_init_int_mode(bp);
9171                         if (rc) {
9172                                 netdev_err(bp->dev, "init int mode failed\n");
9173                                 return rc;
9174                         }
9175                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9176                 }
9177                 if (BNXT_NEW_RM(bp)) {
9178                         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9179
9180                         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9181                         hw_resc->resv_cp_rings = 0;
9182                         hw_resc->resv_stat_ctxs = 0;
9183                         hw_resc->resv_irqs = 0;
9184                         hw_resc->resv_tx_rings = 0;
9185                         hw_resc->resv_rx_rings = 0;
9186                         hw_resc->resv_hw_ring_grps = 0;
9187                         hw_resc->resv_vnics = 0;
9188                         if (!fw_reset) {
9189                                 bp->tx_nr_rings = 0;
9190                                 bp->rx_nr_rings = 0;
9191                         }
9192                 }
9193         }
9194         return 0;
9195 }
9196
9197 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9198 {
9199         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9200         struct hwrm_port_led_qcaps_input req = {0};
9201         struct bnxt_pf_info *pf = &bp->pf;
9202         int rc;
9203
9204         bp->num_leds = 0;
9205         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9206                 return 0;
9207
9208         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9209         req.port_id = cpu_to_le16(pf->port_id);
9210         mutex_lock(&bp->hwrm_cmd_lock);
9211         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9212         if (rc) {
9213                 mutex_unlock(&bp->hwrm_cmd_lock);
9214                 return rc;
9215         }
9216         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9217                 int i;
9218
9219                 bp->num_leds = resp->num_leds;
9220                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9221                                                  bp->num_leds);
9222                 for (i = 0; i < bp->num_leds; i++) {
9223                         struct bnxt_led_info *led = &bp->leds[i];
9224                         __le16 caps = led->led_state_caps;
9225
9226                         if (!led->led_group_id ||
9227                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
9228                                 bp->num_leds = 0;
9229                                 break;
9230                         }
9231                 }
9232         }
9233         mutex_unlock(&bp->hwrm_cmd_lock);
9234         return 0;
9235 }
9236
9237 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9238 {
9239         struct hwrm_wol_filter_alloc_input req = {0};
9240         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9241         int rc;
9242
9243         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9244         req.port_id = cpu_to_le16(bp->pf.port_id);
9245         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9246         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9247         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9248         mutex_lock(&bp->hwrm_cmd_lock);
9249         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9250         if (!rc)
9251                 bp->wol_filter_id = resp->wol_filter_id;
9252         mutex_unlock(&bp->hwrm_cmd_lock);
9253         return rc;
9254 }
9255
9256 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9257 {
9258         struct hwrm_wol_filter_free_input req = {0};
9259
9260         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9261         req.port_id = cpu_to_le16(bp->pf.port_id);
9262         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9263         req.wol_filter_id = bp->wol_filter_id;
9264         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9265 }
9266
9267 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9268 {
9269         struct hwrm_wol_filter_qcfg_input req = {0};
9270         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9271         u16 next_handle = 0;
9272         int rc;
9273
9274         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
9275         req.port_id = cpu_to_le16(bp->pf.port_id);
9276         req.handle = cpu_to_le16(handle);
9277         mutex_lock(&bp->hwrm_cmd_lock);
9278         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9279         if (!rc) {
9280                 next_handle = le16_to_cpu(resp->next_handle);
9281                 if (next_handle != 0) {
9282                         if (resp->wol_type ==
9283                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9284                                 bp->wol = 1;
9285                                 bp->wol_filter_id = resp->wol_filter_id;
9286                         }
9287                 }
9288         }
9289         mutex_unlock(&bp->hwrm_cmd_lock);
9290         return next_handle;
9291 }
9292
9293 static void bnxt_get_wol_settings(struct bnxt *bp)
9294 {
9295         u16 handle = 0;
9296
9297         bp->wol = 0;
9298         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9299                 return;
9300
9301         do {
9302                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9303         } while (handle && handle != 0xffff);
9304 }
9305
9306 #ifdef CONFIG_BNXT_HWMON
9307 static ssize_t bnxt_show_temp(struct device *dev,
9308                               struct device_attribute *devattr, char *buf)
9309 {
9310         struct hwrm_temp_monitor_query_input req = {0};
9311         struct hwrm_temp_monitor_query_output *resp;
9312         struct bnxt *bp = dev_get_drvdata(dev);
9313         u32 len = 0;
9314
9315         resp = bp->hwrm_cmd_resp_addr;
9316         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9317         mutex_lock(&bp->hwrm_cmd_lock);
9318         if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
9319                 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9320         mutex_unlock(&bp->hwrm_cmd_lock);
9321
9322         if (len)
9323                 return len;
9324
9325         return sprintf(buf, "unknown\n");
9326 }
9327 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9328
9329 static struct attribute *bnxt_attrs[] = {
9330         &sensor_dev_attr_temp1_input.dev_attr.attr,
9331         NULL
9332 };
9333 ATTRIBUTE_GROUPS(bnxt);
9334
9335 static void bnxt_hwmon_close(struct bnxt *bp)
9336 {
9337         if (bp->hwmon_dev) {
9338                 hwmon_device_unregister(bp->hwmon_dev);
9339                 bp->hwmon_dev = NULL;
9340         }
9341 }
9342
9343 static void bnxt_hwmon_open(struct bnxt *bp)
9344 {
9345         struct pci_dev *pdev = bp->pdev;
9346
9347         if (bp->hwmon_dev)
9348                 return;
9349
9350         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9351                                                           DRV_MODULE_NAME, bp,
9352                                                           bnxt_groups);
9353         if (IS_ERR(bp->hwmon_dev)) {
9354                 bp->hwmon_dev = NULL;
9355                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9356         }
9357 }
9358 #else
9359 static void bnxt_hwmon_close(struct bnxt *bp)
9360 {
9361 }
9362
9363 static void bnxt_hwmon_open(struct bnxt *bp)
9364 {
9365 }
9366 #endif
9367
9368 static bool bnxt_eee_config_ok(struct bnxt *bp)
9369 {
9370         struct ethtool_eee *eee = &bp->eee;
9371         struct bnxt_link_info *link_info = &bp->link_info;
9372
9373         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
9374                 return true;
9375
9376         if (eee->eee_enabled) {
9377                 u32 advertising =
9378                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9379
9380                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9381                         eee->eee_enabled = 0;
9382                         return false;
9383                 }
9384                 if (eee->advertised & ~advertising) {
9385                         eee->advertised = advertising & eee->supported;
9386                         return false;
9387                 }
9388         }
9389         return true;
9390 }
9391
9392 static int bnxt_update_phy_setting(struct bnxt *bp)
9393 {
9394         int rc;
9395         bool update_link = false;
9396         bool update_pause = false;
9397         bool update_eee = false;
9398         struct bnxt_link_info *link_info = &bp->link_info;
9399
9400         rc = bnxt_update_link(bp, true);
9401         if (rc) {
9402                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9403                            rc);
9404                 return rc;
9405         }
9406         if (!BNXT_SINGLE_PF(bp))
9407                 return 0;
9408
9409         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9410             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
9411             link_info->req_flow_ctrl)
9412                 update_pause = true;
9413         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9414             link_info->force_pause_setting != link_info->req_flow_ctrl)
9415                 update_pause = true;
9416         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9417                 if (BNXT_AUTO_MODE(link_info->auto_mode))
9418                         update_link = true;
9419                 if (link_info->req_link_speed != link_info->force_link_speed)
9420                         update_link = true;
9421                 if (link_info->req_duplex != link_info->duplex_setting)
9422                         update_link = true;
9423         } else {
9424                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
9425                         update_link = true;
9426                 if (link_info->advertising != link_info->auto_link_speeds)
9427                         update_link = true;
9428         }
9429
9430         /* The last close may have shutdown the link, so need to call
9431          * PHY_CFG to bring it back up.
9432          */
9433         if (!bp->link_info.link_up)
9434                 update_link = true;
9435
9436         if (!bnxt_eee_config_ok(bp))
9437                 update_eee = true;
9438
9439         if (update_link)
9440                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
9441         else if (update_pause)
9442                 rc = bnxt_hwrm_set_pause(bp);
9443         if (rc) {
9444                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9445                            rc);
9446                 return rc;
9447         }
9448
9449         return rc;
9450 }
9451
9452 /* Common routine to pre-map certain register block to different GRC window.
9453  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9454  * in PF and 3 windows in VF that can be customized to map in different
9455  * register blocks.
9456  */
9457 static void bnxt_preset_reg_win(struct bnxt *bp)
9458 {
9459         if (BNXT_PF(bp)) {
9460                 /* CAG registers map to GRC window #4 */
9461                 writel(BNXT_CAG_REG_BASE,
9462                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9463         }
9464 }
9465
9466 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9467
9468 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9469 {
9470         int rc = 0;
9471
9472         bnxt_preset_reg_win(bp);
9473         netif_carrier_off(bp->dev);
9474         if (irq_re_init) {
9475                 /* Reserve rings now if none were reserved at driver probe. */
9476                 rc = bnxt_init_dflt_ring_mode(bp);
9477                 if (rc) {
9478                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9479                         return rc;
9480                 }
9481         }
9482         rc = bnxt_reserve_rings(bp, irq_re_init);
9483         if (rc)
9484                 return rc;
9485         if ((bp->flags & BNXT_FLAG_RFS) &&
9486             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
9487                 /* disable RFS if falling back to INTA */
9488                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
9489                 bp->flags &= ~BNXT_FLAG_RFS;
9490         }
9491
9492         rc = bnxt_alloc_mem(bp, irq_re_init);
9493         if (rc) {
9494                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9495                 goto open_err_free_mem;
9496         }
9497
9498         if (irq_re_init) {
9499                 bnxt_init_napi(bp);
9500                 rc = bnxt_request_irq(bp);
9501                 if (rc) {
9502                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
9503                         goto open_err_irq;
9504                 }
9505         }
9506
9507         rc = bnxt_init_nic(bp, irq_re_init);
9508         if (rc) {
9509                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9510                 goto open_err_irq;
9511         }
9512
9513         bnxt_enable_napi(bp);
9514         bnxt_debug_dev_init(bp);
9515
9516         if (link_re_init) {
9517                 mutex_lock(&bp->link_lock);
9518                 rc = bnxt_update_phy_setting(bp);
9519                 mutex_unlock(&bp->link_lock);
9520                 if (rc) {
9521                         netdev_warn(bp->dev, "failed to update phy settings\n");
9522                         if (BNXT_SINGLE_PF(bp)) {
9523                                 bp->link_info.phy_retry = true;
9524                                 bp->link_info.phy_retry_expires =
9525                                         jiffies + 5 * HZ;
9526                         }
9527                 }
9528         }
9529
9530         if (irq_re_init)
9531                 udp_tunnel_nic_reset_ntf(bp->dev);
9532
9533         set_bit(BNXT_STATE_OPEN, &bp->state);
9534         bnxt_enable_int(bp);
9535         /* Enable TX queues */
9536         bnxt_tx_enable(bp);
9537         mod_timer(&bp->timer, jiffies + bp->current_interval);
9538         /* Poll link status and check for SFP+ module status */
9539         bnxt_get_port_module_status(bp);
9540
9541         /* VF-reps may need to be re-opened after the PF is re-opened */
9542         if (BNXT_PF(bp))
9543                 bnxt_vf_reps_open(bp);
9544         return 0;
9545
9546 open_err_irq:
9547         bnxt_del_napi(bp);
9548
9549 open_err_free_mem:
9550         bnxt_free_skbs(bp);
9551         bnxt_free_irq(bp);
9552         bnxt_free_mem(bp, true);
9553         return rc;
9554 }
9555
9556 /* rtnl_lock held */
9557 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9558 {
9559         int rc = 0;
9560
9561         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
9562         if (rc) {
9563                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
9564                 dev_close(bp->dev);
9565         }
9566         return rc;
9567 }
9568
9569 /* rtnl_lock held, open the NIC half way by allocating all resources, but
9570  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
9571  * self tests.
9572  */
9573 int bnxt_half_open_nic(struct bnxt *bp)
9574 {
9575         int rc = 0;
9576
9577         rc = bnxt_alloc_mem(bp, false);
9578         if (rc) {
9579                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9580                 goto half_open_err;
9581         }
9582         rc = bnxt_init_nic(bp, false);
9583         if (rc) {
9584                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9585                 goto half_open_err;
9586         }
9587         return 0;
9588
9589 half_open_err:
9590         bnxt_free_skbs(bp);
9591         bnxt_free_mem(bp, false);
9592         dev_close(bp->dev);
9593         return rc;
9594 }
9595
9596 /* rtnl_lock held, this call can only be made after a previous successful
9597  * call to bnxt_half_open_nic().
9598  */
9599 void bnxt_half_close_nic(struct bnxt *bp)
9600 {
9601         bnxt_hwrm_resource_free(bp, false, false);
9602         bnxt_free_skbs(bp);
9603         bnxt_free_mem(bp, false);
9604 }
9605
9606 static void bnxt_reenable_sriov(struct bnxt *bp)
9607 {
9608         if (BNXT_PF(bp)) {
9609                 struct bnxt_pf_info *pf = &bp->pf;
9610                 int n = pf->active_vfs;
9611
9612                 if (n)
9613                         bnxt_cfg_hw_sriov(bp, &n, true);
9614         }
9615 }
9616
9617 static int bnxt_open(struct net_device *dev)
9618 {
9619         struct bnxt *bp = netdev_priv(dev);
9620         int rc;
9621
9622         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9623                 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
9624                 return -ENODEV;
9625         }
9626
9627         rc = bnxt_hwrm_if_change(bp, true);
9628         if (rc)
9629                 return rc;
9630         rc = __bnxt_open_nic(bp, true, true);
9631         if (rc) {
9632                 bnxt_hwrm_if_change(bp, false);
9633         } else {
9634                 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
9635                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9636                                 bnxt_ulp_start(bp, 0);
9637                                 bnxt_reenable_sriov(bp);
9638                         }
9639                 }
9640                 bnxt_hwmon_open(bp);
9641         }
9642
9643         return rc;
9644 }
9645
9646 static bool bnxt_drv_busy(struct bnxt *bp)
9647 {
9648         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
9649                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
9650 }
9651
9652 static void bnxt_get_ring_stats(struct bnxt *bp,
9653                                 struct rtnl_link_stats64 *stats);
9654
9655 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
9656                              bool link_re_init)
9657 {
9658         /* Close the VF-reps before closing PF */
9659         if (BNXT_PF(bp))
9660                 bnxt_vf_reps_close(bp);
9661
9662         /* Change device state to avoid TX queue wake up's */
9663         bnxt_tx_disable(bp);
9664
9665         clear_bit(BNXT_STATE_OPEN, &bp->state);
9666         smp_mb__after_atomic();
9667         while (bnxt_drv_busy(bp))
9668                 msleep(20);
9669
9670         /* Flush rings and and disable interrupts */
9671         bnxt_shutdown_nic(bp, irq_re_init);
9672
9673         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
9674
9675         bnxt_debug_dev_exit(bp);
9676         bnxt_disable_napi(bp);
9677         del_timer_sync(&bp->timer);
9678         bnxt_free_skbs(bp);
9679
9680         /* Save ring stats before shutdown */
9681         if (bp->bnapi && irq_re_init)
9682                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
9683         if (irq_re_init) {
9684                 bnxt_free_irq(bp);
9685                 bnxt_del_napi(bp);
9686         }
9687         bnxt_free_mem(bp, irq_re_init);
9688 }
9689
9690 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9691 {
9692         int rc = 0;
9693
9694         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9695                 /* If we get here, it means firmware reset is in progress
9696                  * while we are trying to close.  We can safely proceed with
9697                  * the close because we are holding rtnl_lock().  Some firmware
9698                  * messages may fail as we proceed to close.  We set the
9699                  * ABORT_ERR flag here so that the FW reset thread will later
9700                  * abort when it gets the rtnl_lock() and sees the flag.
9701                  */
9702                 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
9703                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9704         }
9705
9706 #ifdef CONFIG_BNXT_SRIOV
9707         if (bp->sriov_cfg) {
9708                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
9709                                                       !bp->sriov_cfg,
9710                                                       BNXT_SRIOV_CFG_WAIT_TMO);
9711                 if (rc)
9712                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
9713         }
9714 #endif
9715         __bnxt_close_nic(bp, irq_re_init, link_re_init);
9716         return rc;
9717 }
9718
9719 static int bnxt_close(struct net_device *dev)
9720 {
9721         struct bnxt *bp = netdev_priv(dev);
9722
9723         bnxt_hwmon_close(bp);
9724         bnxt_close_nic(bp, true, true);
9725         bnxt_hwrm_shutdown_link(bp);
9726         bnxt_hwrm_if_change(bp, false);
9727         return 0;
9728 }
9729
9730 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
9731                                    u16 *val)
9732 {
9733         struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
9734         struct hwrm_port_phy_mdio_read_input req = {0};
9735         int rc;
9736
9737         if (bp->hwrm_spec_code < 0x10a00)
9738                 return -EOPNOTSUPP;
9739
9740         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
9741         req.port_id = cpu_to_le16(bp->pf.port_id);
9742         req.phy_addr = phy_addr;
9743         req.reg_addr = cpu_to_le16(reg & 0x1f);
9744         if (mdio_phy_id_is_c45(phy_addr)) {
9745                 req.cl45_mdio = 1;
9746                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9747                 req.dev_addr = mdio_phy_id_devad(phy_addr);
9748                 req.reg_addr = cpu_to_le16(reg);
9749         }
9750
9751         mutex_lock(&bp->hwrm_cmd_lock);
9752         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9753         if (!rc)
9754                 *val = le16_to_cpu(resp->reg_data);
9755         mutex_unlock(&bp->hwrm_cmd_lock);
9756         return rc;
9757 }
9758
9759 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
9760                                     u16 val)
9761 {
9762         struct hwrm_port_phy_mdio_write_input req = {0};
9763
9764         if (bp->hwrm_spec_code < 0x10a00)
9765                 return -EOPNOTSUPP;
9766
9767         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
9768         req.port_id = cpu_to_le16(bp->pf.port_id);
9769         req.phy_addr = phy_addr;
9770         req.reg_addr = cpu_to_le16(reg & 0x1f);
9771         if (mdio_phy_id_is_c45(phy_addr)) {
9772                 req.cl45_mdio = 1;
9773                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9774                 req.dev_addr = mdio_phy_id_devad(phy_addr);
9775                 req.reg_addr = cpu_to_le16(reg);
9776         }
9777         req.reg_data = cpu_to_le16(val);
9778
9779         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9780 }
9781
9782 /* rtnl_lock held */
9783 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9784 {
9785         struct mii_ioctl_data *mdio = if_mii(ifr);
9786         struct bnxt *bp = netdev_priv(dev);
9787         int rc;
9788
9789         switch (cmd) {
9790         case SIOCGMIIPHY:
9791                 mdio->phy_id = bp->link_info.phy_addr;
9792
9793                 fallthrough;
9794         case SIOCGMIIREG: {
9795                 u16 mii_regval = 0;
9796
9797                 if (!netif_running(dev))
9798                         return -EAGAIN;
9799
9800                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
9801                                              &mii_regval);
9802                 mdio->val_out = mii_regval;
9803                 return rc;
9804         }
9805
9806         case SIOCSMIIREG:
9807                 if (!netif_running(dev))
9808                         return -EAGAIN;
9809
9810                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
9811                                                 mdio->val_in);
9812
9813         default:
9814                 /* do nothing */
9815                 break;
9816         }
9817         return -EOPNOTSUPP;
9818 }
9819
9820 static void bnxt_get_ring_stats(struct bnxt *bp,
9821                                 struct rtnl_link_stats64 *stats)
9822 {
9823         int i;
9824
9825         for (i = 0; i < bp->cp_nr_rings; i++) {
9826                 struct bnxt_napi *bnapi = bp->bnapi[i];
9827                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9828                 u64 *sw = cpr->stats.sw_stats;
9829
9830                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
9831                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
9832                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
9833
9834                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
9835                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
9836                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
9837
9838                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
9839                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
9840                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
9841
9842                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
9843                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
9844                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
9845
9846                 stats->rx_missed_errors +=
9847                         BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
9848
9849                 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
9850
9851                 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
9852         }
9853 }
9854
9855 static void bnxt_add_prev_stats(struct bnxt *bp,
9856                                 struct rtnl_link_stats64 *stats)
9857 {
9858         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
9859
9860         stats->rx_packets += prev_stats->rx_packets;
9861         stats->tx_packets += prev_stats->tx_packets;
9862         stats->rx_bytes += prev_stats->rx_bytes;
9863         stats->tx_bytes += prev_stats->tx_bytes;
9864         stats->rx_missed_errors += prev_stats->rx_missed_errors;
9865         stats->multicast += prev_stats->multicast;
9866         stats->tx_dropped += prev_stats->tx_dropped;
9867 }
9868
9869 static void
9870 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
9871 {
9872         struct bnxt *bp = netdev_priv(dev);
9873
9874         set_bit(BNXT_STATE_READ_STATS, &bp->state);
9875         /* Make sure bnxt_close_nic() sees that we are reading stats before
9876          * we check the BNXT_STATE_OPEN flag.
9877          */
9878         smp_mb__after_atomic();
9879         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9880                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9881                 *stats = bp->net_stats_prev;
9882                 return;
9883         }
9884
9885         bnxt_get_ring_stats(bp, stats);
9886         bnxt_add_prev_stats(bp, stats);
9887
9888         if (bp->flags & BNXT_FLAG_PORT_STATS) {
9889                 u64 *rx = bp->port_stats.sw_stats;
9890                 u64 *tx = bp->port_stats.sw_stats +
9891                           BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9892
9893                 stats->rx_crc_errors =
9894                         BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
9895                 stats->rx_frame_errors =
9896                         BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
9897                 stats->rx_length_errors =
9898                         BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
9899                         BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
9900                         BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
9901                 stats->rx_errors =
9902                         BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
9903                         BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
9904                 stats->collisions =
9905                         BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
9906                 stats->tx_fifo_errors =
9907                         BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
9908                 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
9909         }
9910         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9911 }
9912
9913 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
9914 {
9915         struct net_device *dev = bp->dev;
9916         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9917         struct netdev_hw_addr *ha;
9918         u8 *haddr;
9919         int mc_count = 0;
9920         bool update = false;
9921         int off = 0;
9922
9923         netdev_for_each_mc_addr(ha, dev) {
9924                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
9925                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9926                         vnic->mc_list_count = 0;
9927                         return false;
9928                 }
9929                 haddr = ha->addr;
9930                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
9931                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
9932                         update = true;
9933                 }
9934                 off += ETH_ALEN;
9935                 mc_count++;
9936         }
9937         if (mc_count)
9938                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
9939
9940         if (mc_count != vnic->mc_list_count) {
9941                 vnic->mc_list_count = mc_count;
9942                 update = true;
9943         }
9944         return update;
9945 }
9946
9947 static bool bnxt_uc_list_updated(struct bnxt *bp)
9948 {
9949         struct net_device *dev = bp->dev;
9950         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9951         struct netdev_hw_addr *ha;
9952         int off = 0;
9953
9954         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
9955                 return true;
9956
9957         netdev_for_each_uc_addr(ha, dev) {
9958                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
9959                         return true;
9960
9961                 off += ETH_ALEN;
9962         }
9963         return false;
9964 }
9965
9966 static void bnxt_set_rx_mode(struct net_device *dev)
9967 {
9968         struct bnxt *bp = netdev_priv(dev);
9969         struct bnxt_vnic_info *vnic;
9970         bool mc_update = false;
9971         bool uc_update;
9972         u32 mask;
9973
9974         if (!test_bit(BNXT_STATE_OPEN, &bp->state))
9975                 return;
9976
9977         vnic = &bp->vnic_info[0];
9978         mask = vnic->rx_mask;
9979         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
9980                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
9981                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
9982                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
9983
9984         if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
9985                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9986
9987         uc_update = bnxt_uc_list_updated(bp);
9988
9989         if (dev->flags & IFF_BROADCAST)
9990                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
9991         if (dev->flags & IFF_ALLMULTI) {
9992                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9993                 vnic->mc_list_count = 0;
9994         } else {
9995                 mc_update = bnxt_mc_list_updated(bp, &mask);
9996         }
9997
9998         if (mask != vnic->rx_mask || uc_update || mc_update) {
9999                 vnic->rx_mask = mask;
10000
10001                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10002                 bnxt_queue_sp_work(bp);
10003         }
10004 }
10005
10006 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10007 {
10008         struct net_device *dev = bp->dev;
10009         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10010         struct netdev_hw_addr *ha;
10011         int i, off = 0, rc;
10012         bool uc_update;
10013
10014         netif_addr_lock_bh(dev);
10015         uc_update = bnxt_uc_list_updated(bp);
10016         netif_addr_unlock_bh(dev);
10017
10018         if (!uc_update)
10019                 goto skip_uc;
10020
10021         mutex_lock(&bp->hwrm_cmd_lock);
10022         for (i = 1; i < vnic->uc_filter_count; i++) {
10023                 struct hwrm_cfa_l2_filter_free_input req = {0};
10024
10025                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10026                                        -1);
10027
10028                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
10029
10030                 rc = _hwrm_send_message(bp, &req, sizeof(req),
10031                                         HWRM_CMD_TIMEOUT);
10032         }
10033         mutex_unlock(&bp->hwrm_cmd_lock);
10034
10035         vnic->uc_filter_count = 1;
10036
10037         netif_addr_lock_bh(dev);
10038         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10039                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10040         } else {
10041                 netdev_for_each_uc_addr(ha, dev) {
10042                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10043                         off += ETH_ALEN;
10044                         vnic->uc_filter_count++;
10045                 }
10046         }
10047         netif_addr_unlock_bh(dev);
10048
10049         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10050                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10051                 if (rc) {
10052                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10053                                    rc);
10054                         vnic->uc_filter_count = i;
10055                         return rc;
10056                 }
10057         }
10058
10059 skip_uc:
10060         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10061         if (rc && vnic->mc_list_count) {
10062                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10063                             rc);
10064                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10065                 vnic->mc_list_count = 0;
10066                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10067         }
10068         if (rc)
10069                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10070                            rc);
10071
10072         return rc;
10073 }
10074
10075 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10076 {
10077 #ifdef CONFIG_BNXT_SRIOV
10078         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10079                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10080
10081                 /* No minimum rings were provisioned by the PF.  Don't
10082                  * reserve rings by default when device is down.
10083                  */
10084                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10085                         return true;
10086
10087                 if (!netif_running(bp->dev))
10088                         return false;
10089         }
10090 #endif
10091         return true;
10092 }
10093
10094 /* If the chip and firmware supports RFS */
10095 static bool bnxt_rfs_supported(struct bnxt *bp)
10096 {
10097         if (bp->flags & BNXT_FLAG_CHIP_P5) {
10098                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10099                         return true;
10100                 return false;
10101         }
10102         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10103                 return true;
10104         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10105                 return true;
10106         return false;
10107 }
10108
10109 /* If runtime conditions support RFS */
10110 static bool bnxt_rfs_capable(struct bnxt *bp)
10111 {
10112 #ifdef CONFIG_RFS_ACCEL
10113         int vnics, max_vnics, max_rss_ctxs;
10114
10115         if (bp->flags & BNXT_FLAG_CHIP_P5)
10116                 return bnxt_rfs_supported(bp);
10117         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10118                 return false;
10119
10120         vnics = 1 + bp->rx_nr_rings;
10121         max_vnics = bnxt_get_max_func_vnics(bp);
10122         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10123
10124         /* RSS contexts not a limiting factor */
10125         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10126                 max_rss_ctxs = max_vnics;
10127         if (vnics > max_vnics || vnics > max_rss_ctxs) {
10128                 if (bp->rx_nr_rings > 1)
10129                         netdev_warn(bp->dev,
10130                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10131                                     min(max_rss_ctxs - 1, max_vnics - 1));
10132                 return false;
10133         }
10134
10135         if (!BNXT_NEW_RM(bp))
10136                 return true;
10137
10138         if (vnics == bp->hw_resc.resv_vnics)
10139                 return true;
10140
10141         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10142         if (vnics <= bp->hw_resc.resv_vnics)
10143                 return true;
10144
10145         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10146         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10147         return false;
10148 #else
10149         return false;
10150 #endif
10151 }
10152
10153 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10154                                            netdev_features_t features)
10155 {
10156         struct bnxt *bp = netdev_priv(dev);
10157         netdev_features_t vlan_features;
10158
10159         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10160                 features &= ~NETIF_F_NTUPLE;
10161
10162         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10163                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10164
10165         if (!(features & NETIF_F_GRO))
10166                 features &= ~NETIF_F_GRO_HW;
10167
10168         if (features & NETIF_F_GRO_HW)
10169                 features &= ~NETIF_F_LRO;
10170
10171         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10172          * turned on or off together.
10173          */
10174         vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10175         if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10176                 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10177                         features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10178                 else if (vlan_features)
10179                         features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10180         }
10181 #ifdef CONFIG_BNXT_SRIOV
10182         if (BNXT_VF(bp) && bp->vf.vlan)
10183                 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10184 #endif
10185         return features;
10186 }
10187
10188 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10189 {
10190         struct bnxt *bp = netdev_priv(dev);
10191         u32 flags = bp->flags;
10192         u32 changes;
10193         int rc = 0;
10194         bool re_init = false;
10195         bool update_tpa = false;
10196
10197         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10198         if (features & NETIF_F_GRO_HW)
10199                 flags |= BNXT_FLAG_GRO;
10200         else if (features & NETIF_F_LRO)
10201                 flags |= BNXT_FLAG_LRO;
10202
10203         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10204                 flags &= ~BNXT_FLAG_TPA;
10205
10206         if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10207                 flags |= BNXT_FLAG_STRIP_VLAN;
10208
10209         if (features & NETIF_F_NTUPLE)
10210                 flags |= BNXT_FLAG_RFS;
10211
10212         changes = flags ^ bp->flags;
10213         if (changes & BNXT_FLAG_TPA) {
10214                 update_tpa = true;
10215                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10216                     (flags & BNXT_FLAG_TPA) == 0 ||
10217                     (bp->flags & BNXT_FLAG_CHIP_P5))
10218                         re_init = true;
10219         }
10220
10221         if (changes & ~BNXT_FLAG_TPA)
10222                 re_init = true;
10223
10224         if (flags != bp->flags) {
10225                 u32 old_flags = bp->flags;
10226
10227                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10228                         bp->flags = flags;
10229                         if (update_tpa)
10230                                 bnxt_set_ring_params(bp);
10231                         return rc;
10232                 }
10233
10234                 if (re_init) {
10235                         bnxt_close_nic(bp, false, false);
10236                         bp->flags = flags;
10237                         if (update_tpa)
10238                                 bnxt_set_ring_params(bp);
10239
10240                         return bnxt_open_nic(bp, false, false);
10241                 }
10242                 if (update_tpa) {
10243                         bp->flags = flags;
10244                         rc = bnxt_set_tpa(bp,
10245                                           (flags & BNXT_FLAG_TPA) ?
10246                                           true : false);
10247                         if (rc)
10248                                 bp->flags = old_flags;
10249                 }
10250         }
10251         return rc;
10252 }
10253
10254 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
10255                          u32 *reg_buf)
10256 {
10257         struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
10258         struct hwrm_dbg_read_direct_input req = {0};
10259         __le32 *dbg_reg_buf;
10260         dma_addr_t mapping;
10261         int rc, i;
10262
10263         dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
10264                                          &mapping, GFP_KERNEL);
10265         if (!dbg_reg_buf)
10266                 return -ENOMEM;
10267         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
10268         req.host_dest_addr = cpu_to_le64(mapping);
10269         req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
10270         req.read_len32 = cpu_to_le32(num_words);
10271         mutex_lock(&bp->hwrm_cmd_lock);
10272         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10273         if (rc || resp->error_code) {
10274                 rc = -EIO;
10275                 goto dbg_rd_reg_exit;
10276         }
10277         for (i = 0; i < num_words; i++)
10278                 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
10279
10280 dbg_rd_reg_exit:
10281         mutex_unlock(&bp->hwrm_cmd_lock);
10282         dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
10283         return rc;
10284 }
10285
10286 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
10287                                        u32 ring_id, u32 *prod, u32 *cons)
10288 {
10289         struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
10290         struct hwrm_dbg_ring_info_get_input req = {0};
10291         int rc;
10292
10293         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
10294         req.ring_type = ring_type;
10295         req.fw_ring_id = cpu_to_le32(ring_id);
10296         mutex_lock(&bp->hwrm_cmd_lock);
10297         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10298         if (!rc) {
10299                 *prod = le32_to_cpu(resp->producer_index);
10300                 *cons = le32_to_cpu(resp->consumer_index);
10301         }
10302         mutex_unlock(&bp->hwrm_cmd_lock);
10303         return rc;
10304 }
10305
10306 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
10307 {
10308         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
10309         int i = bnapi->index;
10310
10311         if (!txr)
10312                 return;
10313
10314         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
10315                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
10316                     txr->tx_cons);
10317 }
10318
10319 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
10320 {
10321         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
10322         int i = bnapi->index;
10323
10324         if (!rxr)
10325                 return;
10326
10327         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
10328                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
10329                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
10330                     rxr->rx_sw_agg_prod);
10331 }
10332
10333 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
10334 {
10335         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10336         int i = bnapi->index;
10337
10338         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
10339                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
10340 }
10341
10342 static void bnxt_dbg_dump_states(struct bnxt *bp)
10343 {
10344         int i;
10345         struct bnxt_napi *bnapi;
10346
10347         for (i = 0; i < bp->cp_nr_rings; i++) {
10348                 bnapi = bp->bnapi[i];
10349                 if (netif_msg_drv(bp)) {
10350                         bnxt_dump_tx_sw_state(bnapi);
10351                         bnxt_dump_rx_sw_state(bnapi);
10352                         bnxt_dump_cp_sw_state(bnapi);
10353                 }
10354         }
10355 }
10356
10357 static void bnxt_reset_task(struct bnxt *bp, bool silent)
10358 {
10359         if (!silent)
10360                 bnxt_dbg_dump_states(bp);
10361         if (netif_running(bp->dev)) {
10362                 int rc;
10363
10364                 if (silent) {
10365                         bnxt_close_nic(bp, false, false);
10366                         bnxt_open_nic(bp, false, false);
10367                 } else {
10368                         bnxt_ulp_stop(bp);
10369                         bnxt_close_nic(bp, true, false);
10370                         rc = bnxt_open_nic(bp, true, false);
10371                         bnxt_ulp_start(bp, rc);
10372                 }
10373         }
10374 }
10375
10376 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
10377 {
10378         struct bnxt *bp = netdev_priv(dev);
10379
10380         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
10381         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
10382         bnxt_queue_sp_work(bp);
10383 }
10384
10385 static void bnxt_fw_health_check(struct bnxt *bp)
10386 {
10387         struct bnxt_fw_health *fw_health = bp->fw_health;
10388         u32 val;
10389
10390         if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10391                 return;
10392
10393         if (fw_health->tmr_counter) {
10394                 fw_health->tmr_counter--;
10395                 return;
10396         }
10397
10398         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10399         if (val == fw_health->last_fw_heartbeat)
10400                 goto fw_reset;
10401
10402         fw_health->last_fw_heartbeat = val;
10403
10404         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10405         if (val != fw_health->last_fw_reset_cnt)
10406                 goto fw_reset;
10407
10408         fw_health->tmr_counter = fw_health->tmr_multiplier;
10409         return;
10410
10411 fw_reset:
10412         set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
10413         bnxt_queue_sp_work(bp);
10414 }
10415
10416 static void bnxt_timer(struct timer_list *t)
10417 {
10418         struct bnxt *bp = from_timer(bp, t, timer);
10419         struct net_device *dev = bp->dev;
10420
10421         if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
10422                 return;
10423
10424         if (atomic_read(&bp->intr_sem) != 0)
10425                 goto bnxt_restart_timer;
10426
10427         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
10428                 bnxt_fw_health_check(bp);
10429
10430         if (bp->link_info.link_up && bp->stats_coal_ticks) {
10431                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
10432                 bnxt_queue_sp_work(bp);
10433         }
10434
10435         if (bnxt_tc_flower_enabled(bp)) {
10436                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
10437                 bnxt_queue_sp_work(bp);
10438         }
10439
10440 #ifdef CONFIG_RFS_ACCEL
10441         if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
10442                 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
10443                 bnxt_queue_sp_work(bp);
10444         }
10445 #endif /*CONFIG_RFS_ACCEL*/
10446
10447         if (bp->link_info.phy_retry) {
10448                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
10449                         bp->link_info.phy_retry = false;
10450                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
10451                 } else {
10452                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
10453                         bnxt_queue_sp_work(bp);
10454                 }
10455         }
10456
10457         if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
10458             netif_carrier_ok(dev)) {
10459                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
10460                 bnxt_queue_sp_work(bp);
10461         }
10462 bnxt_restart_timer:
10463         mod_timer(&bp->timer, jiffies + bp->current_interval);
10464 }
10465
10466 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
10467 {
10468         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
10469          * set.  If the device is being closed, bnxt_close() may be holding
10470          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
10471          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
10472          */
10473         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10474         rtnl_lock();
10475 }
10476
10477 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
10478 {
10479         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10480         rtnl_unlock();
10481 }
10482
10483 /* Only called from bnxt_sp_task() */
10484 static void bnxt_reset(struct bnxt *bp, bool silent)
10485 {
10486         bnxt_rtnl_lock_sp(bp);
10487         if (test_bit(BNXT_STATE_OPEN, &bp->state))
10488                 bnxt_reset_task(bp, silent);
10489         bnxt_rtnl_unlock_sp(bp);
10490 }
10491
10492 static void bnxt_fw_reset_close(struct bnxt *bp)
10493 {
10494         bnxt_ulp_stop(bp);
10495         /* When firmware is fatal state, disable PCI device to prevent
10496          * any potential bad DMAs before freeing kernel memory.
10497          */
10498         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10499                 pci_disable_device(bp->pdev);
10500         __bnxt_close_nic(bp, true, false);
10501         bnxt_clear_int_mode(bp);
10502         bnxt_hwrm_func_drv_unrgtr(bp);
10503         if (pci_is_enabled(bp->pdev))
10504                 pci_disable_device(bp->pdev);
10505         bnxt_free_ctx_mem(bp);
10506         kfree(bp->ctx);
10507         bp->ctx = NULL;
10508 }
10509
10510 static bool is_bnxt_fw_ok(struct bnxt *bp)
10511 {
10512         struct bnxt_fw_health *fw_health = bp->fw_health;
10513         bool no_heartbeat = false, has_reset = false;
10514         u32 val;
10515
10516         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10517         if (val == fw_health->last_fw_heartbeat)
10518                 no_heartbeat = true;
10519
10520         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10521         if (val != fw_health->last_fw_reset_cnt)
10522                 has_reset = true;
10523
10524         if (!no_heartbeat && has_reset)
10525                 return true;
10526
10527         return false;
10528 }
10529
10530 /* rtnl_lock is acquired before calling this function */
10531 static void bnxt_force_fw_reset(struct bnxt *bp)
10532 {
10533         struct bnxt_fw_health *fw_health = bp->fw_health;
10534         u32 wait_dsecs;
10535
10536         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
10537             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10538                 return;
10539
10540         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10541         bnxt_fw_reset_close(bp);
10542         wait_dsecs = fw_health->master_func_wait_dsecs;
10543         if (fw_health->master) {
10544                 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
10545                         wait_dsecs = 0;
10546                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10547         } else {
10548                 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
10549                 wait_dsecs = fw_health->normal_func_wait_dsecs;
10550                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10551         }
10552
10553         bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
10554         bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
10555         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10556 }
10557
10558 void bnxt_fw_exception(struct bnxt *bp)
10559 {
10560         netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
10561         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10562         bnxt_rtnl_lock_sp(bp);
10563         bnxt_force_fw_reset(bp);
10564         bnxt_rtnl_unlock_sp(bp);
10565 }
10566
10567 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
10568  * < 0 on error.
10569  */
10570 static int bnxt_get_registered_vfs(struct bnxt *bp)
10571 {
10572 #ifdef CONFIG_BNXT_SRIOV
10573         int rc;
10574
10575         if (!BNXT_PF(bp))
10576                 return 0;
10577
10578         rc = bnxt_hwrm_func_qcfg(bp);
10579         if (rc) {
10580                 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
10581                 return rc;
10582         }
10583         if (bp->pf.registered_vfs)
10584                 return bp->pf.registered_vfs;
10585         if (bp->sriov_cfg)
10586                 return 1;
10587 #endif
10588         return 0;
10589 }
10590
10591 void bnxt_fw_reset(struct bnxt *bp)
10592 {
10593         bnxt_rtnl_lock_sp(bp);
10594         if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
10595             !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10596                 int n = 0, tmo;
10597
10598                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10599                 if (bp->pf.active_vfs &&
10600                     !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10601                         n = bnxt_get_registered_vfs(bp);
10602                 if (n < 0) {
10603                         netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
10604                                    n);
10605                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10606                         dev_close(bp->dev);
10607                         goto fw_reset_exit;
10608                 } else if (n > 0) {
10609                         u16 vf_tmo_dsecs = n * 10;
10610
10611                         if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
10612                                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
10613                         bp->fw_reset_state =
10614                                 BNXT_FW_RESET_STATE_POLL_VF;
10615                         bnxt_queue_fw_reset_work(bp, HZ / 10);
10616                         goto fw_reset_exit;
10617                 }
10618                 bnxt_fw_reset_close(bp);
10619                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10620                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10621                         tmo = HZ / 10;
10622                 } else {
10623                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10624                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
10625                 }
10626                 bnxt_queue_fw_reset_work(bp, tmo);
10627         }
10628 fw_reset_exit:
10629         bnxt_rtnl_unlock_sp(bp);
10630 }
10631
10632 static void bnxt_chk_missed_irq(struct bnxt *bp)
10633 {
10634         int i;
10635
10636         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10637                 return;
10638
10639         for (i = 0; i < bp->cp_nr_rings; i++) {
10640                 struct bnxt_napi *bnapi = bp->bnapi[i];
10641                 struct bnxt_cp_ring_info *cpr;
10642                 u32 fw_ring_id;
10643                 int j;
10644
10645                 if (!bnapi)
10646                         continue;
10647
10648                 cpr = &bnapi->cp_ring;
10649                 for (j = 0; j < 2; j++) {
10650                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
10651                         u32 val[2];
10652
10653                         if (!cpr2 || cpr2->has_more_work ||
10654                             !bnxt_has_work(bp, cpr2))
10655                                 continue;
10656
10657                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
10658                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
10659                                 continue;
10660                         }
10661                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
10662                         bnxt_dbg_hwrm_ring_info_get(bp,
10663                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
10664                                 fw_ring_id, &val[0], &val[1]);
10665                         cpr->sw_stats.cmn.missed_irqs++;
10666                 }
10667         }
10668 }
10669
10670 static void bnxt_cfg_ntp_filters(struct bnxt *);
10671
10672 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
10673 {
10674         struct bnxt_link_info *link_info = &bp->link_info;
10675
10676         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
10677                 link_info->autoneg = BNXT_AUTONEG_SPEED;
10678                 if (bp->hwrm_spec_code >= 0x10201) {
10679                         if (link_info->auto_pause_setting &
10680                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10681                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10682                 } else {
10683                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10684                 }
10685                 link_info->advertising = link_info->auto_link_speeds;
10686         } else {
10687                 link_info->req_link_speed = link_info->force_link_speed;
10688                 link_info->req_duplex = link_info->duplex_setting;
10689         }
10690         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10691                 link_info->req_flow_ctrl =
10692                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10693         else
10694                 link_info->req_flow_ctrl = link_info->force_pause_setting;
10695 }
10696
10697 static void bnxt_sp_task(struct work_struct *work)
10698 {
10699         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
10700
10701         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10702         smp_mb__after_atomic();
10703         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10704                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10705                 return;
10706         }
10707
10708         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
10709                 bnxt_cfg_rx_mode(bp);
10710
10711         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
10712                 bnxt_cfg_ntp_filters(bp);
10713         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
10714                 bnxt_hwrm_exec_fwd_req(bp);
10715         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
10716                 bnxt_hwrm_port_qstats(bp, 0);
10717                 bnxt_hwrm_port_qstats_ext(bp, 0);
10718                 bnxt_accumulate_all_stats(bp);
10719         }
10720
10721         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
10722                 int rc;
10723
10724                 mutex_lock(&bp->link_lock);
10725                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
10726                                        &bp->sp_event))
10727                         bnxt_hwrm_phy_qcaps(bp);
10728
10729                 rc = bnxt_update_link(bp, true);
10730                 if (rc)
10731                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
10732                                    rc);
10733
10734                 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
10735                                        &bp->sp_event))
10736                         bnxt_init_ethtool_link_settings(bp);
10737                 mutex_unlock(&bp->link_lock);
10738         }
10739         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
10740                 int rc;
10741
10742                 mutex_lock(&bp->link_lock);
10743                 rc = bnxt_update_phy_setting(bp);
10744                 mutex_unlock(&bp->link_lock);
10745                 if (rc) {
10746                         netdev_warn(bp->dev, "update phy settings retry failed\n");
10747                 } else {
10748                         bp->link_info.phy_retry = false;
10749                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
10750                 }
10751         }
10752         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
10753                 mutex_lock(&bp->link_lock);
10754                 bnxt_get_port_module_status(bp);
10755                 mutex_unlock(&bp->link_lock);
10756         }
10757
10758         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
10759                 bnxt_tc_flow_stats_work(bp);
10760
10761         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
10762                 bnxt_chk_missed_irq(bp);
10763
10764         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
10765          * must be the last functions to be called before exiting.
10766          */
10767         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
10768                 bnxt_reset(bp, false);
10769
10770         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
10771                 bnxt_reset(bp, true);
10772
10773         if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
10774                 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
10775
10776         if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
10777                 if (!is_bnxt_fw_ok(bp))
10778                         bnxt_devlink_health_report(bp,
10779                                                    BNXT_FW_EXCEPTION_SP_EVENT);
10780         }
10781
10782         smp_mb__before_atomic();
10783         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10784 }
10785
10786 /* Under rtnl_lock */
10787 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
10788                      int tx_xdp)
10789 {
10790         int max_rx, max_tx, tx_sets = 1;
10791         int tx_rings_needed, stats;
10792         int rx_rings = rx;
10793         int cp, vnics, rc;
10794
10795         if (tcs)
10796                 tx_sets = tcs;
10797
10798         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
10799         if (rc)
10800                 return rc;
10801
10802         if (max_rx < rx)
10803                 return -ENOMEM;
10804
10805         tx_rings_needed = tx * tx_sets + tx_xdp;
10806         if (max_tx < tx_rings_needed)
10807                 return -ENOMEM;
10808
10809         vnics = 1;
10810         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
10811                 vnics += rx_rings;
10812
10813         if (bp->flags & BNXT_FLAG_AGG_RINGS)
10814                 rx_rings <<= 1;
10815         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
10816         stats = cp;
10817         if (BNXT_NEW_RM(bp)) {
10818                 cp += bnxt_get_ulp_msix_num(bp);
10819                 stats += bnxt_get_ulp_stat_ctxs(bp);
10820         }
10821         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
10822                                      stats, vnics);
10823 }
10824
10825 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
10826 {
10827         if (bp->bar2) {
10828                 pci_iounmap(pdev, bp->bar2);
10829                 bp->bar2 = NULL;
10830         }
10831
10832         if (bp->bar1) {
10833                 pci_iounmap(pdev, bp->bar1);
10834                 bp->bar1 = NULL;
10835         }
10836
10837         if (bp->bar0) {
10838                 pci_iounmap(pdev, bp->bar0);
10839                 bp->bar0 = NULL;
10840         }
10841 }
10842
10843 static void bnxt_cleanup_pci(struct bnxt *bp)
10844 {
10845         bnxt_unmap_bars(bp, bp->pdev);
10846         pci_release_regions(bp->pdev);
10847         if (pci_is_enabled(bp->pdev))
10848                 pci_disable_device(bp->pdev);
10849 }
10850
10851 static void bnxt_init_dflt_coal(struct bnxt *bp)
10852 {
10853         struct bnxt_coal *coal;
10854
10855         /* Tick values in micro seconds.
10856          * 1 coal_buf x bufs_per_record = 1 completion record.
10857          */
10858         coal = &bp->rx_coal;
10859         coal->coal_ticks = 10;
10860         coal->coal_bufs = 30;
10861         coal->coal_ticks_irq = 1;
10862         coal->coal_bufs_irq = 2;
10863         coal->idle_thresh = 50;
10864         coal->bufs_per_record = 2;
10865         coal->budget = 64;              /* NAPI budget */
10866
10867         coal = &bp->tx_coal;
10868         coal->coal_ticks = 28;
10869         coal->coal_bufs = 30;
10870         coal->coal_ticks_irq = 2;
10871         coal->coal_bufs_irq = 2;
10872         coal->bufs_per_record = 1;
10873
10874         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
10875 }
10876
10877 static void bnxt_alloc_fw_health(struct bnxt *bp)
10878 {
10879         if (bp->fw_health)
10880                 return;
10881
10882         if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
10883             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10884                 return;
10885
10886         bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
10887         if (!bp->fw_health) {
10888                 netdev_warn(bp->dev, "Failed to allocate fw_health\n");
10889                 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
10890                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10891         }
10892 }
10893
10894 static int bnxt_fw_init_one_p1(struct bnxt *bp)
10895 {
10896         int rc;
10897
10898         bp->fw_cap = 0;
10899         rc = bnxt_hwrm_ver_get(bp);
10900         if (rc)
10901                 return rc;
10902
10903         if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10904                 rc = bnxt_alloc_kong_hwrm_resources(bp);
10905                 if (rc)
10906                         bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10907         }
10908
10909         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10910             bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
10911                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10912                 if (rc)
10913                         return rc;
10914         }
10915         rc = bnxt_hwrm_func_reset(bp);
10916         if (rc)
10917                 return -ENODEV;
10918
10919         bnxt_hwrm_fw_set_time(bp);
10920         return 0;
10921 }
10922
10923 static int bnxt_fw_init_one_p2(struct bnxt *bp)
10924 {
10925         int rc;
10926
10927         /* Get the MAX capabilities for this function */
10928         rc = bnxt_hwrm_func_qcaps(bp);
10929         if (rc) {
10930                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10931                            rc);
10932                 return -ENODEV;
10933         }
10934
10935         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
10936         if (rc)
10937                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
10938                             rc);
10939
10940         bnxt_alloc_fw_health(bp);
10941         rc = bnxt_hwrm_error_recovery_qcfg(bp);
10942         if (rc)
10943                 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
10944                             rc);
10945
10946         rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
10947         if (rc)
10948                 return -ENODEV;
10949
10950         bnxt_hwrm_func_qcfg(bp);
10951         bnxt_hwrm_vnic_qcaps(bp);
10952         bnxt_hwrm_port_led_qcaps(bp);
10953         bnxt_ethtool_init(bp);
10954         bnxt_dcb_init(bp);
10955         return 0;
10956 }
10957
10958 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
10959 {
10960         bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
10961         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10962                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10963                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10964                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
10965         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
10966                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10967                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10968                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10969         }
10970 }
10971
10972 static void bnxt_set_dflt_rfs(struct bnxt *bp)
10973 {
10974         struct net_device *dev = bp->dev;
10975
10976         dev->hw_features &= ~NETIF_F_NTUPLE;
10977         dev->features &= ~NETIF_F_NTUPLE;
10978         bp->flags &= ~BNXT_FLAG_RFS;
10979         if (bnxt_rfs_supported(bp)) {
10980                 dev->hw_features |= NETIF_F_NTUPLE;
10981                 if (bnxt_rfs_capable(bp)) {
10982                         bp->flags |= BNXT_FLAG_RFS;
10983                         dev->features |= NETIF_F_NTUPLE;
10984                 }
10985         }
10986 }
10987
10988 static void bnxt_fw_init_one_p3(struct bnxt *bp)
10989 {
10990         struct pci_dev *pdev = bp->pdev;
10991
10992         bnxt_set_dflt_rss_hash_type(bp);
10993         bnxt_set_dflt_rfs(bp);
10994
10995         bnxt_get_wol_settings(bp);
10996         if (bp->flags & BNXT_FLAG_WOL_CAP)
10997                 device_set_wakeup_enable(&pdev->dev, bp->wol);
10998         else
10999                 device_set_wakeup_capable(&pdev->dev, false);
11000
11001         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11002         bnxt_hwrm_coal_params_qcaps(bp);
11003 }
11004
11005 static int bnxt_fw_init_one(struct bnxt *bp)
11006 {
11007         int rc;
11008
11009         rc = bnxt_fw_init_one_p1(bp);
11010         if (rc) {
11011                 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11012                 return rc;
11013         }
11014         rc = bnxt_fw_init_one_p2(bp);
11015         if (rc) {
11016                 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11017                 return rc;
11018         }
11019         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11020         if (rc)
11021                 return rc;
11022
11023         /* In case fw capabilities have changed, destroy the unneeded
11024          * reporters and create newly capable ones.
11025          */
11026         bnxt_dl_fw_reporters_destroy(bp, false);
11027         bnxt_dl_fw_reporters_create(bp);
11028         bnxt_fw_init_one_p3(bp);
11029         return 0;
11030 }
11031
11032 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11033 {
11034         struct bnxt_fw_health *fw_health = bp->fw_health;
11035         u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11036         u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11037         u32 reg_type, reg_off, delay_msecs;
11038
11039         delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11040         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11041         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11042         switch (reg_type) {
11043         case BNXT_FW_HEALTH_REG_TYPE_CFG:
11044                 pci_write_config_dword(bp->pdev, reg_off, val);
11045                 break;
11046         case BNXT_FW_HEALTH_REG_TYPE_GRC:
11047                 writel(reg_off & BNXT_GRC_BASE_MASK,
11048                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11049                 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
11050                 fallthrough;
11051         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
11052                 writel(val, bp->bar0 + reg_off);
11053                 break;
11054         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
11055                 writel(val, bp->bar1 + reg_off);
11056                 break;
11057         }
11058         if (delay_msecs) {
11059                 pci_read_config_dword(bp->pdev, 0, &val);
11060                 msleep(delay_msecs);
11061         }
11062 }
11063
11064 static void bnxt_reset_all(struct bnxt *bp)
11065 {
11066         struct bnxt_fw_health *fw_health = bp->fw_health;
11067         int i, rc;
11068
11069         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11070 #ifdef CONFIG_TEE_BNXT_FW
11071                 rc = tee_bnxt_fw_load();
11072                 if (rc)
11073                         netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
11074                 bp->fw_reset_timestamp = jiffies;
11075 #endif
11076                 return;
11077         }
11078
11079         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
11080                 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
11081                         bnxt_fw_reset_writel(bp, i);
11082         } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
11083                 struct hwrm_fw_reset_input req = {0};
11084
11085                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
11086                 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
11087                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
11088                 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
11089                 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
11090                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11091                 if (rc)
11092                         netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
11093         }
11094         bp->fw_reset_timestamp = jiffies;
11095 }
11096
11097 static void bnxt_fw_reset_task(struct work_struct *work)
11098 {
11099         struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
11100         int rc;
11101
11102         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11103                 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
11104                 return;
11105         }
11106
11107         switch (bp->fw_reset_state) {
11108         case BNXT_FW_RESET_STATE_POLL_VF: {
11109                 int n = bnxt_get_registered_vfs(bp);
11110                 int tmo;
11111
11112                 if (n < 0) {
11113                         netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
11114                                    n, jiffies_to_msecs(jiffies -
11115                                    bp->fw_reset_timestamp));
11116                         goto fw_reset_abort;
11117                 } else if (n > 0) {
11118                         if (time_after(jiffies, bp->fw_reset_timestamp +
11119                                        (bp->fw_reset_max_dsecs * HZ / 10))) {
11120                                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11121                                 bp->fw_reset_state = 0;
11122                                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
11123                                            n);
11124                                 return;
11125                         }
11126                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11127                         return;
11128                 }
11129                 bp->fw_reset_timestamp = jiffies;
11130                 rtnl_lock();
11131                 bnxt_fw_reset_close(bp);
11132                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11133                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11134                         tmo = HZ / 10;
11135                 } else {
11136                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11137                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
11138                 }
11139                 rtnl_unlock();
11140                 bnxt_queue_fw_reset_work(bp, tmo);
11141                 return;
11142         }
11143         case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
11144                 u32 val;
11145
11146                 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11147                 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
11148                     !time_after(jiffies, bp->fw_reset_timestamp +
11149                     (bp->fw_reset_max_dsecs * HZ / 10))) {
11150                         bnxt_queue_fw_reset_work(bp, HZ / 5);
11151                         return;
11152                 }
11153
11154                 if (!bp->fw_health->master) {
11155                         u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
11156
11157                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11158                         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11159                         return;
11160                 }
11161                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11162         }
11163                 fallthrough;
11164         case BNXT_FW_RESET_STATE_RESET_FW:
11165                 bnxt_reset_all(bp);
11166                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11167                 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
11168                 return;
11169         case BNXT_FW_RESET_STATE_ENABLE_DEV:
11170                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11171                         u32 val;
11172
11173                         val = bnxt_fw_health_readl(bp,
11174                                                    BNXT_FW_RESET_INPROG_REG);
11175                         if (val)
11176                                 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
11177                                             val);
11178                 }
11179                 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11180                 if (pci_enable_device(bp->pdev)) {
11181                         netdev_err(bp->dev, "Cannot re-enable PCI device\n");
11182                         goto fw_reset_abort;
11183                 }
11184                 pci_set_master(bp->pdev);
11185                 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
11186                 fallthrough;
11187         case BNXT_FW_RESET_STATE_POLL_FW:
11188                 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
11189                 rc = __bnxt_hwrm_ver_get(bp, true);
11190                 if (rc) {
11191                         if (time_after(jiffies, bp->fw_reset_timestamp +
11192                                        (bp->fw_reset_max_dsecs * HZ / 10))) {
11193                                 netdev_err(bp->dev, "Firmware reset aborted\n");
11194                                 goto fw_reset_abort;
11195                         }
11196                         bnxt_queue_fw_reset_work(bp, HZ / 5);
11197                         return;
11198                 }
11199                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
11200                 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
11201                 fallthrough;
11202         case BNXT_FW_RESET_STATE_OPENING:
11203                 while (!rtnl_trylock()) {
11204                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11205                         return;
11206                 }
11207                 rc = bnxt_open(bp->dev);
11208                 if (rc) {
11209                         netdev_err(bp->dev, "bnxt_open_nic() failed\n");
11210                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11211                         dev_close(bp->dev);
11212                 }
11213
11214                 bp->fw_reset_state = 0;
11215                 /* Make sure fw_reset_state is 0 before clearing the flag */
11216                 smp_mb__before_atomic();
11217                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11218                 bnxt_ulp_start(bp, rc);
11219                 if (!rc)
11220                         bnxt_reenable_sriov(bp);
11221                 bnxt_dl_health_recovery_done(bp);
11222                 bnxt_dl_health_status_update(bp, true);
11223                 rtnl_unlock();
11224                 break;
11225         }
11226         return;
11227
11228 fw_reset_abort:
11229         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11230         if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
11231                 bnxt_dl_health_status_update(bp, false);
11232         bp->fw_reset_state = 0;
11233         rtnl_lock();
11234         dev_close(bp->dev);
11235         rtnl_unlock();
11236 }
11237
11238 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
11239 {
11240         int rc;
11241         struct bnxt *bp = netdev_priv(dev);
11242
11243         SET_NETDEV_DEV(dev, &pdev->dev);
11244
11245         /* enable device (incl. PCI PM wakeup), and bus-mastering */
11246         rc = pci_enable_device(pdev);
11247         if (rc) {
11248                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
11249                 goto init_err;
11250         }
11251
11252         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11253                 dev_err(&pdev->dev,
11254                         "Cannot find PCI device base address, aborting\n");
11255                 rc = -ENODEV;
11256                 goto init_err_disable;
11257         }
11258
11259         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11260         if (rc) {
11261                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
11262                 goto init_err_disable;
11263         }
11264
11265         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
11266             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
11267                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
11268                 goto init_err_disable;
11269         }
11270
11271         pci_set_master(pdev);
11272
11273         bp->dev = dev;
11274         bp->pdev = pdev;
11275
11276         /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
11277          * determines the BAR size.
11278          */
11279         bp->bar0 = pci_ioremap_bar(pdev, 0);
11280         if (!bp->bar0) {
11281                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
11282                 rc = -ENOMEM;
11283                 goto init_err_release;
11284         }
11285
11286         bp->bar2 = pci_ioremap_bar(pdev, 4);
11287         if (!bp->bar2) {
11288                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
11289                 rc = -ENOMEM;
11290                 goto init_err_release;
11291         }
11292
11293         pci_enable_pcie_error_reporting(pdev);
11294
11295         INIT_WORK(&bp->sp_task, bnxt_sp_task);
11296         INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
11297
11298         spin_lock_init(&bp->ntp_fltr_lock);
11299 #if BITS_PER_LONG == 32
11300         spin_lock_init(&bp->db_lock);
11301 #endif
11302
11303         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
11304         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
11305
11306         bnxt_init_dflt_coal(bp);
11307
11308         timer_setup(&bp->timer, bnxt_timer, 0);
11309         bp->current_interval = BNXT_TIMER_INTERVAL;
11310
11311         bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
11312         bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
11313
11314         clear_bit(BNXT_STATE_OPEN, &bp->state);
11315         return 0;
11316
11317 init_err_release:
11318         bnxt_unmap_bars(bp, pdev);
11319         pci_release_regions(pdev);
11320
11321 init_err_disable:
11322         pci_disable_device(pdev);
11323
11324 init_err:
11325         return rc;
11326 }
11327
11328 /* rtnl_lock held */
11329 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
11330 {
11331         struct sockaddr *addr = p;
11332         struct bnxt *bp = netdev_priv(dev);
11333         int rc = 0;
11334
11335         if (!is_valid_ether_addr(addr->sa_data))
11336                 return -EADDRNOTAVAIL;
11337
11338         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
11339                 return 0;
11340
11341         rc = bnxt_approve_mac(bp, addr->sa_data, true);
11342         if (rc)
11343                 return rc;
11344
11345         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11346         if (netif_running(dev)) {
11347                 bnxt_close_nic(bp, false, false);
11348                 rc = bnxt_open_nic(bp, false, false);
11349         }
11350
11351         return rc;
11352 }
11353
11354 /* rtnl_lock held */
11355 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
11356 {
11357         struct bnxt *bp = netdev_priv(dev);
11358
11359         if (netif_running(dev))
11360                 bnxt_close_nic(bp, true, false);
11361
11362         dev->mtu = new_mtu;
11363         bnxt_set_ring_params(bp);
11364
11365         if (netif_running(dev))
11366                 return bnxt_open_nic(bp, true, false);
11367
11368         return 0;
11369 }
11370
11371 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
11372 {
11373         struct bnxt *bp = netdev_priv(dev);
11374         bool sh = false;
11375         int rc;
11376
11377         if (tc > bp->max_tc) {
11378                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
11379                            tc, bp->max_tc);
11380                 return -EINVAL;
11381         }
11382
11383         if (netdev_get_num_tc(dev) == tc)
11384                 return 0;
11385
11386         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11387                 sh = true;
11388
11389         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
11390                               sh, tc, bp->tx_nr_rings_xdp);
11391         if (rc)
11392                 return rc;
11393
11394         /* Needs to close the device and do hw resource re-allocations */
11395         if (netif_running(bp->dev))
11396                 bnxt_close_nic(bp, true, false);
11397
11398         if (tc) {
11399                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
11400                 netdev_set_num_tc(dev, tc);
11401         } else {
11402                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11403                 netdev_reset_tc(dev);
11404         }
11405         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
11406         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
11407                                bp->tx_nr_rings + bp->rx_nr_rings;
11408
11409         if (netif_running(bp->dev))
11410                 return bnxt_open_nic(bp, true, false);
11411
11412         return 0;
11413 }
11414
11415 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
11416                                   void *cb_priv)
11417 {
11418         struct bnxt *bp = cb_priv;
11419
11420         if (!bnxt_tc_flower_enabled(bp) ||
11421             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
11422                 return -EOPNOTSUPP;
11423
11424         switch (type) {
11425         case TC_SETUP_CLSFLOWER:
11426                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
11427         default:
11428                 return -EOPNOTSUPP;
11429         }
11430 }
11431
11432 LIST_HEAD(bnxt_block_cb_list);
11433
11434 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
11435                          void *type_data)
11436 {
11437         struct bnxt *bp = netdev_priv(dev);
11438
11439         switch (type) {
11440         case TC_SETUP_BLOCK:
11441                 return flow_block_cb_setup_simple(type_data,
11442                                                   &bnxt_block_cb_list,
11443                                                   bnxt_setup_tc_block_cb,
11444                                                   bp, bp, true);
11445         case TC_SETUP_QDISC_MQPRIO: {
11446                 struct tc_mqprio_qopt *mqprio = type_data;
11447
11448                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
11449
11450                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
11451         }
11452         default:
11453                 return -EOPNOTSUPP;
11454         }
11455 }
11456
11457 #ifdef CONFIG_RFS_ACCEL
11458 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
11459                             struct bnxt_ntuple_filter *f2)
11460 {
11461         struct flow_keys *keys1 = &f1->fkeys;
11462         struct flow_keys *keys2 = &f2->fkeys;
11463
11464         if (keys1->basic.n_proto != keys2->basic.n_proto ||
11465             keys1->basic.ip_proto != keys2->basic.ip_proto)
11466                 return false;
11467
11468         if (keys1->basic.n_proto == htons(ETH_P_IP)) {
11469                 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
11470                     keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
11471                         return false;
11472         } else {
11473                 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
11474                            sizeof(keys1->addrs.v6addrs.src)) ||
11475                     memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
11476                            sizeof(keys1->addrs.v6addrs.dst)))
11477                         return false;
11478         }
11479
11480         if (keys1->ports.ports == keys2->ports.ports &&
11481             keys1->control.flags == keys2->control.flags &&
11482             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
11483             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
11484                 return true;
11485
11486         return false;
11487 }
11488
11489 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
11490                               u16 rxq_index, u32 flow_id)
11491 {
11492         struct bnxt *bp = netdev_priv(dev);
11493         struct bnxt_ntuple_filter *fltr, *new_fltr;
11494         struct flow_keys *fkeys;
11495         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
11496         int rc = 0, idx, bit_id, l2_idx = 0;
11497         struct hlist_head *head;
11498         u32 flags;
11499
11500         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
11501                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11502                 int off = 0, j;
11503
11504                 netif_addr_lock_bh(dev);
11505                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
11506                         if (ether_addr_equal(eth->h_dest,
11507                                              vnic->uc_list + off)) {
11508                                 l2_idx = j + 1;
11509                                 break;
11510                         }
11511                 }
11512                 netif_addr_unlock_bh(dev);
11513                 if (!l2_idx)
11514                         return -EINVAL;
11515         }
11516         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
11517         if (!new_fltr)
11518                 return -ENOMEM;
11519
11520         fkeys = &new_fltr->fkeys;
11521         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
11522                 rc = -EPROTONOSUPPORT;
11523                 goto err_free;
11524         }
11525
11526         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
11527              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
11528             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
11529              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
11530                 rc = -EPROTONOSUPPORT;
11531                 goto err_free;
11532         }
11533         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
11534             bp->hwrm_spec_code < 0x10601) {
11535                 rc = -EPROTONOSUPPORT;
11536                 goto err_free;
11537         }
11538         flags = fkeys->control.flags;
11539         if (((flags & FLOW_DIS_ENCAPSULATION) &&
11540              bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
11541                 rc = -EPROTONOSUPPORT;
11542                 goto err_free;
11543         }
11544
11545         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
11546         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
11547
11548         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
11549         head = &bp->ntp_fltr_hash_tbl[idx];
11550         rcu_read_lock();
11551         hlist_for_each_entry_rcu(fltr, head, hash) {
11552                 if (bnxt_fltr_match(fltr, new_fltr)) {
11553                         rcu_read_unlock();
11554                         rc = 0;
11555                         goto err_free;
11556                 }
11557         }
11558         rcu_read_unlock();
11559
11560         spin_lock_bh(&bp->ntp_fltr_lock);
11561         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
11562                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
11563         if (bit_id < 0) {
11564                 spin_unlock_bh(&bp->ntp_fltr_lock);
11565                 rc = -ENOMEM;
11566                 goto err_free;
11567         }
11568
11569         new_fltr->sw_id = (u16)bit_id;
11570         new_fltr->flow_id = flow_id;
11571         new_fltr->l2_fltr_idx = l2_idx;
11572         new_fltr->rxq = rxq_index;
11573         hlist_add_head_rcu(&new_fltr->hash, head);
11574         bp->ntp_fltr_count++;
11575         spin_unlock_bh(&bp->ntp_fltr_lock);
11576
11577         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11578         bnxt_queue_sp_work(bp);
11579
11580         return new_fltr->sw_id;
11581
11582 err_free:
11583         kfree(new_fltr);
11584         return rc;
11585 }
11586
11587 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11588 {
11589         int i;
11590
11591         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
11592                 struct hlist_head *head;
11593                 struct hlist_node *tmp;
11594                 struct bnxt_ntuple_filter *fltr;
11595                 int rc;
11596
11597                 head = &bp->ntp_fltr_hash_tbl[i];
11598                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
11599                         bool del = false;
11600
11601                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
11602                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
11603                                                         fltr->flow_id,
11604                                                         fltr->sw_id)) {
11605                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
11606                                                                          fltr);
11607                                         del = true;
11608                                 }
11609                         } else {
11610                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
11611                                                                        fltr);
11612                                 if (rc)
11613                                         del = true;
11614                                 else
11615                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
11616                         }
11617
11618                         if (del) {
11619                                 spin_lock_bh(&bp->ntp_fltr_lock);
11620                                 hlist_del_rcu(&fltr->hash);
11621                                 bp->ntp_fltr_count--;
11622                                 spin_unlock_bh(&bp->ntp_fltr_lock);
11623                                 synchronize_rcu();
11624                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
11625                                 kfree(fltr);
11626                         }
11627                 }
11628         }
11629         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
11630                 netdev_info(bp->dev, "Receive PF driver unload event!\n");
11631 }
11632
11633 #else
11634
11635 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11636 {
11637 }
11638
11639 #endif /* CONFIG_RFS_ACCEL */
11640
11641 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
11642 {
11643         struct bnxt *bp = netdev_priv(netdev);
11644         struct udp_tunnel_info ti;
11645         unsigned int cmd;
11646
11647         udp_tunnel_nic_get_port(netdev, table, 0, &ti);
11648         if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
11649                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
11650         else
11651                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
11652
11653         if (ti.port)
11654                 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
11655
11656         return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
11657 }
11658
11659 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
11660         .sync_table     = bnxt_udp_tunnel_sync,
11661         .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
11662                           UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
11663         .tables         = {
11664                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
11665                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
11666         },
11667 };
11668
11669 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11670                                struct net_device *dev, u32 filter_mask,
11671                                int nlflags)
11672 {
11673         struct bnxt *bp = netdev_priv(dev);
11674
11675         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
11676                                        nlflags, filter_mask, NULL);
11677 }
11678
11679 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
11680                                u16 flags, struct netlink_ext_ack *extack)
11681 {
11682         struct bnxt *bp = netdev_priv(dev);
11683         struct nlattr *attr, *br_spec;
11684         int rem, rc = 0;
11685
11686         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
11687                 return -EOPNOTSUPP;
11688
11689         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11690         if (!br_spec)
11691                 return -EINVAL;
11692
11693         nla_for_each_nested(attr, br_spec, rem) {
11694                 u16 mode;
11695
11696                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11697                         continue;
11698
11699                 if (nla_len(attr) < sizeof(mode))
11700                         return -EINVAL;
11701
11702                 mode = nla_get_u16(attr);
11703                 if (mode == bp->br_mode)
11704                         break;
11705
11706                 rc = bnxt_hwrm_set_br_mode(bp, mode);
11707                 if (!rc)
11708                         bp->br_mode = mode;
11709                 break;
11710         }
11711         return rc;
11712 }
11713
11714 int bnxt_get_port_parent_id(struct net_device *dev,
11715                             struct netdev_phys_item_id *ppid)
11716 {
11717         struct bnxt *bp = netdev_priv(dev);
11718
11719         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
11720                 return -EOPNOTSUPP;
11721
11722         /* The PF and it's VF-reps only support the switchdev framework */
11723         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
11724                 return -EOPNOTSUPP;
11725
11726         ppid->id_len = sizeof(bp->dsn);
11727         memcpy(ppid->id, bp->dsn, ppid->id_len);
11728
11729         return 0;
11730 }
11731
11732 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
11733 {
11734         struct bnxt *bp = netdev_priv(dev);
11735
11736         return &bp->dl_port;
11737 }
11738
11739 static const struct net_device_ops bnxt_netdev_ops = {
11740         .ndo_open               = bnxt_open,
11741         .ndo_start_xmit         = bnxt_start_xmit,
11742         .ndo_stop               = bnxt_close,
11743         .ndo_get_stats64        = bnxt_get_stats64,
11744         .ndo_set_rx_mode        = bnxt_set_rx_mode,
11745         .ndo_do_ioctl           = bnxt_ioctl,
11746         .ndo_validate_addr      = eth_validate_addr,
11747         .ndo_set_mac_address    = bnxt_change_mac_addr,
11748         .ndo_change_mtu         = bnxt_change_mtu,
11749         .ndo_fix_features       = bnxt_fix_features,
11750         .ndo_set_features       = bnxt_set_features,
11751         .ndo_tx_timeout         = bnxt_tx_timeout,
11752 #ifdef CONFIG_BNXT_SRIOV
11753         .ndo_get_vf_config      = bnxt_get_vf_config,
11754         .ndo_set_vf_mac         = bnxt_set_vf_mac,
11755         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
11756         .ndo_set_vf_rate        = bnxt_set_vf_bw,
11757         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
11758         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
11759         .ndo_set_vf_trust       = bnxt_set_vf_trust,
11760 #endif
11761         .ndo_setup_tc           = bnxt_setup_tc,
11762 #ifdef CONFIG_RFS_ACCEL
11763         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
11764 #endif
11765         .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
11766         .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
11767         .ndo_bpf                = bnxt_xdp,
11768         .ndo_xdp_xmit           = bnxt_xdp_xmit,
11769         .ndo_bridge_getlink     = bnxt_bridge_getlink,
11770         .ndo_bridge_setlink     = bnxt_bridge_setlink,
11771         .ndo_get_devlink_port   = bnxt_get_devlink_port,
11772 };
11773
11774 static void bnxt_remove_one(struct pci_dev *pdev)
11775 {
11776         struct net_device *dev = pci_get_drvdata(pdev);
11777         struct bnxt *bp = netdev_priv(dev);
11778
11779         if (BNXT_PF(bp))
11780                 bnxt_sriov_disable(bp);
11781
11782         bnxt_dl_fw_reporters_destroy(bp, true);
11783         if (BNXT_PF(bp))
11784                 devlink_port_type_clear(&bp->dl_port);
11785         pci_disable_pcie_error_reporting(pdev);
11786         unregister_netdev(dev);
11787         bnxt_dl_unregister(bp);
11788         bnxt_shutdown_tc(bp);
11789         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11790         bnxt_cancel_sp_work(bp);
11791         bp->sp_event = 0;
11792
11793         bnxt_clear_int_mode(bp);
11794         bnxt_hwrm_func_drv_unrgtr(bp);
11795         bnxt_free_hwrm_resources(bp);
11796         bnxt_free_hwrm_short_cmd_req(bp);
11797         bnxt_ethtool_free(bp);
11798         bnxt_dcb_free(bp);
11799         kfree(bp->edev);
11800         bp->edev = NULL;
11801         kfree(bp->fw_health);
11802         bp->fw_health = NULL;
11803         bnxt_cleanup_pci(bp);
11804         bnxt_free_ctx_mem(bp);
11805         kfree(bp->ctx);
11806         bp->ctx = NULL;
11807         kfree(bp->rss_indir_tbl);
11808         bp->rss_indir_tbl = NULL;
11809         bnxt_free_port_stats(bp);
11810         free_netdev(dev);
11811 }
11812
11813 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
11814 {
11815         int rc = 0;
11816         struct bnxt_link_info *link_info = &bp->link_info;
11817
11818         rc = bnxt_hwrm_phy_qcaps(bp);
11819         if (rc) {
11820                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
11821                            rc);
11822                 return rc;
11823         }
11824         if (!fw_dflt)
11825                 return 0;
11826
11827         rc = bnxt_update_link(bp, false);
11828         if (rc) {
11829                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
11830                            rc);
11831                 return rc;
11832         }
11833
11834         /* Older firmware does not have supported_auto_speeds, so assume
11835          * that all supported speeds can be autonegotiated.
11836          */
11837         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
11838                 link_info->support_auto_speeds = link_info->support_speeds;
11839
11840         bnxt_init_ethtool_link_settings(bp);
11841         return 0;
11842 }
11843
11844 static int bnxt_get_max_irq(struct pci_dev *pdev)
11845 {
11846         u16 ctrl;
11847
11848         if (!pdev->msix_cap)
11849                 return 1;
11850
11851         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
11852         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
11853 }
11854
11855 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11856                                 int *max_cp)
11857 {
11858         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11859         int max_ring_grps = 0, max_irq;
11860
11861         *max_tx = hw_resc->max_tx_rings;
11862         *max_rx = hw_resc->max_rx_rings;
11863         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
11864         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
11865                         bnxt_get_ulp_msix_num(bp),
11866                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
11867         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11868                 *max_cp = min_t(int, *max_cp, max_irq);
11869         max_ring_grps = hw_resc->max_hw_ring_grps;
11870         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
11871                 *max_cp -= 1;
11872                 *max_rx -= 2;
11873         }
11874         if (bp->flags & BNXT_FLAG_AGG_RINGS)
11875                 *max_rx >>= 1;
11876         if (bp->flags & BNXT_FLAG_CHIP_P5) {
11877                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
11878                 /* On P5 chips, max_cp output param should be available NQs */
11879                 *max_cp = max_irq;
11880         }
11881         *max_rx = min_t(int, *max_rx, max_ring_grps);
11882 }
11883
11884 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
11885 {
11886         int rx, tx, cp;
11887
11888         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
11889         *max_rx = rx;
11890         *max_tx = tx;
11891         if (!rx || !tx || !cp)
11892                 return -ENOMEM;
11893
11894         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
11895 }
11896
11897 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11898                                bool shared)
11899 {
11900         int rc;
11901
11902         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11903         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
11904                 /* Not enough rings, try disabling agg rings. */
11905                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
11906                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11907                 if (rc) {
11908                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
11909                         bp->flags |= BNXT_FLAG_AGG_RINGS;
11910                         return rc;
11911                 }
11912                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
11913                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11914                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11915                 bnxt_set_ring_params(bp);
11916         }
11917
11918         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
11919                 int max_cp, max_stat, max_irq;
11920
11921                 /* Reserve minimum resources for RoCE */
11922                 max_cp = bnxt_get_max_func_cp_rings(bp);
11923                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
11924                 max_irq = bnxt_get_max_func_irqs(bp);
11925                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
11926                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
11927                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
11928                         return 0;
11929
11930                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
11931                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
11932                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
11933                 max_cp = min_t(int, max_cp, max_irq);
11934                 max_cp = min_t(int, max_cp, max_stat);
11935                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
11936                 if (rc)
11937                         rc = 0;
11938         }
11939         return rc;
11940 }
11941
11942 /* In initial default shared ring setting, each shared ring must have a
11943  * RX/TX ring pair.
11944  */
11945 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
11946 {
11947         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
11948         bp->rx_nr_rings = bp->cp_nr_rings;
11949         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
11950         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11951 }
11952
11953 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
11954 {
11955         int dflt_rings, max_rx_rings, max_tx_rings, rc;
11956
11957         if (!bnxt_can_reserve_rings(bp))
11958                 return 0;
11959
11960         if (sh)
11961                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
11962         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
11963         /* Reduce default rings on multi-port cards so that total default
11964          * rings do not exceed CPU count.
11965          */
11966         if (bp->port_count > 1) {
11967                 int max_rings =
11968                         max_t(int, num_online_cpus() / bp->port_count, 1);
11969
11970                 dflt_rings = min_t(int, dflt_rings, max_rings);
11971         }
11972         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
11973         if (rc)
11974                 return rc;
11975         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
11976         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
11977         if (sh)
11978                 bnxt_trim_dflt_sh_rings(bp);
11979         else
11980                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
11981         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11982
11983         rc = __bnxt_reserve_rings(bp);
11984         if (rc)
11985                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
11986         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11987         if (sh)
11988                 bnxt_trim_dflt_sh_rings(bp);
11989
11990         /* Rings may have been trimmed, re-reserve the trimmed rings. */
11991         if (bnxt_need_reserve_rings(bp)) {
11992                 rc = __bnxt_reserve_rings(bp);
11993                 if (rc)
11994                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
11995                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11996         }
11997         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11998                 bp->rx_nr_rings++;
11999                 bp->cp_nr_rings++;
12000         }
12001         if (rc) {
12002                 bp->tx_nr_rings = 0;
12003                 bp->rx_nr_rings = 0;
12004         }
12005         return rc;
12006 }
12007
12008 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
12009 {
12010         int rc;
12011
12012         if (bp->tx_nr_rings)
12013                 return 0;
12014
12015         bnxt_ulp_irq_stop(bp);
12016         bnxt_clear_int_mode(bp);
12017         rc = bnxt_set_dflt_rings(bp, true);
12018         if (rc) {
12019                 netdev_err(bp->dev, "Not enough rings available.\n");
12020                 goto init_dflt_ring_err;
12021         }
12022         rc = bnxt_init_int_mode(bp);
12023         if (rc)
12024                 goto init_dflt_ring_err;
12025
12026         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12027         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
12028                 bp->flags |= BNXT_FLAG_RFS;
12029                 bp->dev->features |= NETIF_F_NTUPLE;
12030         }
12031 init_dflt_ring_err:
12032         bnxt_ulp_irq_restart(bp, rc);
12033         return rc;
12034 }
12035
12036 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
12037 {
12038         int rc;
12039
12040         ASSERT_RTNL();
12041         bnxt_hwrm_func_qcaps(bp);
12042
12043         if (netif_running(bp->dev))
12044                 __bnxt_close_nic(bp, true, false);
12045
12046         bnxt_ulp_irq_stop(bp);
12047         bnxt_clear_int_mode(bp);
12048         rc = bnxt_init_int_mode(bp);
12049         bnxt_ulp_irq_restart(bp, rc);
12050
12051         if (netif_running(bp->dev)) {
12052                 if (rc)
12053                         dev_close(bp->dev);
12054                 else
12055                         rc = bnxt_open_nic(bp, true, false);
12056         }
12057
12058         return rc;
12059 }
12060
12061 static int bnxt_init_mac_addr(struct bnxt *bp)
12062 {
12063         int rc = 0;
12064
12065         if (BNXT_PF(bp)) {
12066                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
12067         } else {
12068 #ifdef CONFIG_BNXT_SRIOV
12069                 struct bnxt_vf_info *vf = &bp->vf;
12070                 bool strict_approval = true;
12071
12072                 if (is_valid_ether_addr(vf->mac_addr)) {
12073                         /* overwrite netdev dev_addr with admin VF MAC */
12074                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
12075                         /* Older PF driver or firmware may not approve this
12076                          * correctly.
12077                          */
12078                         strict_approval = false;
12079                 } else {
12080                         eth_hw_addr_random(bp->dev);
12081                 }
12082                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
12083 #endif
12084         }
12085         return rc;
12086 }
12087
12088 #define BNXT_VPD_LEN    512
12089 static void bnxt_vpd_read_info(struct bnxt *bp)
12090 {
12091         struct pci_dev *pdev = bp->pdev;
12092         int i, len, pos, ro_size;
12093         ssize_t vpd_size;
12094         u8 *vpd_data;
12095
12096         vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
12097         if (!vpd_data)
12098                 return;
12099
12100         vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
12101         if (vpd_size <= 0) {
12102                 netdev_err(bp->dev, "Unable to read VPD\n");
12103                 goto exit;
12104         }
12105
12106         i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
12107         if (i < 0) {
12108                 netdev_err(bp->dev, "VPD READ-Only not found\n");
12109                 goto exit;
12110         }
12111
12112         ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
12113         i += PCI_VPD_LRDT_TAG_SIZE;
12114         if (i + ro_size > vpd_size)
12115                 goto exit;
12116
12117         pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12118                                         PCI_VPD_RO_KEYWORD_PARTNO);
12119         if (pos < 0)
12120                 goto read_sn;
12121
12122         len = pci_vpd_info_field_size(&vpd_data[pos]);
12123         pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12124         if (len + pos > vpd_size)
12125                 goto read_sn;
12126
12127         strlcpy(bp->board_partno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
12128
12129 read_sn:
12130         pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12131                                         PCI_VPD_RO_KEYWORD_SERIALNO);
12132         if (pos < 0)
12133                 goto exit;
12134
12135         len = pci_vpd_info_field_size(&vpd_data[pos]);
12136         pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12137         if (len + pos > vpd_size)
12138                 goto exit;
12139
12140         strlcpy(bp->board_serialno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
12141 exit:
12142         kfree(vpd_data);
12143 }
12144
12145 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
12146 {
12147         struct pci_dev *pdev = bp->pdev;
12148         u64 qword;
12149
12150         qword = pci_get_dsn(pdev);
12151         if (!qword) {
12152                 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
12153                 return -EOPNOTSUPP;
12154         }
12155
12156         put_unaligned_le64(qword, dsn);
12157
12158         bp->flags |= BNXT_FLAG_DSN_VALID;
12159         return 0;
12160 }
12161
12162 static int bnxt_map_db_bar(struct bnxt *bp)
12163 {
12164         if (!bp->db_size)
12165                 return -ENODEV;
12166         bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
12167         if (!bp->bar1)
12168                 return -ENOMEM;
12169         return 0;
12170 }
12171
12172 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
12173 {
12174         struct net_device *dev;
12175         struct bnxt *bp;
12176         int rc, max_irqs;
12177
12178         if (pci_is_bridge(pdev))
12179                 return -ENODEV;
12180
12181         /* Clear any pending DMA transactions from crash kernel
12182          * while loading driver in capture kernel.
12183          */
12184         if (is_kdump_kernel()) {
12185                 pci_clear_master(pdev);
12186                 pcie_flr(pdev);
12187         }
12188
12189         max_irqs = bnxt_get_max_irq(pdev);
12190         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
12191         if (!dev)
12192                 return -ENOMEM;
12193
12194         bp = netdev_priv(dev);
12195         bnxt_set_max_func_irqs(bp, max_irqs);
12196
12197         if (bnxt_vf_pciid(ent->driver_data))
12198                 bp->flags |= BNXT_FLAG_VF;
12199
12200         if (pdev->msix_cap)
12201                 bp->flags |= BNXT_FLAG_MSIX_CAP;
12202
12203         rc = bnxt_init_board(pdev, dev);
12204         if (rc < 0)
12205                 goto init_err_free;
12206
12207         dev->netdev_ops = &bnxt_netdev_ops;
12208         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
12209         dev->ethtool_ops = &bnxt_ethtool_ops;
12210         pci_set_drvdata(pdev, dev);
12211
12212         if (BNXT_PF(bp))
12213                 bnxt_vpd_read_info(bp);
12214
12215         rc = bnxt_alloc_hwrm_resources(bp);
12216         if (rc)
12217                 goto init_err_pci_clean;
12218
12219         mutex_init(&bp->hwrm_cmd_lock);
12220         mutex_init(&bp->link_lock);
12221
12222         rc = bnxt_fw_init_one_p1(bp);
12223         if (rc)
12224                 goto init_err_pci_clean;
12225
12226         if (BNXT_CHIP_P5(bp))
12227                 bp->flags |= BNXT_FLAG_CHIP_P5;
12228
12229         rc = bnxt_alloc_rss_indir_tbl(bp);
12230         if (rc)
12231                 goto init_err_pci_clean;
12232
12233         rc = bnxt_fw_init_one_p2(bp);
12234         if (rc)
12235                 goto init_err_pci_clean;
12236
12237         rc = bnxt_map_db_bar(bp);
12238         if (rc) {
12239                 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
12240                         rc);
12241                 goto init_err_pci_clean;
12242         }
12243
12244         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12245                            NETIF_F_TSO | NETIF_F_TSO6 |
12246                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
12247                            NETIF_F_GSO_IPXIP4 |
12248                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
12249                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
12250                            NETIF_F_RXCSUM | NETIF_F_GRO;
12251
12252         if (BNXT_SUPPORTS_TPA(bp))
12253                 dev->hw_features |= NETIF_F_LRO;
12254
12255         dev->hw_enc_features =
12256                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12257                         NETIF_F_TSO | NETIF_F_TSO6 |
12258                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
12259                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
12260                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
12261         dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
12262
12263         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
12264                                     NETIF_F_GSO_GRE_CSUM;
12265         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
12266         if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
12267                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
12268         if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
12269                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
12270         if (BNXT_SUPPORTS_TPA(bp))
12271                 dev->hw_features |= NETIF_F_GRO_HW;
12272         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
12273         if (dev->features & NETIF_F_GRO_HW)
12274                 dev->features &= ~NETIF_F_LRO;
12275         dev->priv_flags |= IFF_UNICAST_FLT;
12276
12277 #ifdef CONFIG_BNXT_SRIOV
12278         init_waitqueue_head(&bp->sriov_cfg_wait);
12279         mutex_init(&bp->sriov_lock);
12280 #endif
12281         if (BNXT_SUPPORTS_TPA(bp)) {
12282                 bp->gro_func = bnxt_gro_func_5730x;
12283                 if (BNXT_CHIP_P4(bp))
12284                         bp->gro_func = bnxt_gro_func_5731x;
12285                 else if (BNXT_CHIP_P5(bp))
12286                         bp->gro_func = bnxt_gro_func_5750x;
12287         }
12288         if (!BNXT_CHIP_P4_PLUS(bp))
12289                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
12290
12291         bp->ulp_probe = bnxt_ulp_probe;
12292
12293         rc = bnxt_init_mac_addr(bp);
12294         if (rc) {
12295                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
12296                 rc = -EADDRNOTAVAIL;
12297                 goto init_err_pci_clean;
12298         }
12299
12300         if (BNXT_PF(bp)) {
12301                 /* Read the adapter's DSN to use as the eswitch switch_id */
12302                 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
12303         }
12304
12305         /* MTU range: 60 - FW defined max */
12306         dev->min_mtu = ETH_ZLEN;
12307         dev->max_mtu = bp->max_mtu;
12308
12309         rc = bnxt_probe_phy(bp, true);
12310         if (rc)
12311                 goto init_err_pci_clean;
12312
12313         bnxt_set_rx_skb_mode(bp, false);
12314         bnxt_set_tpa_flags(bp);
12315         bnxt_set_ring_params(bp);
12316         rc = bnxt_set_dflt_rings(bp, true);
12317         if (rc) {
12318                 netdev_err(bp->dev, "Not enough rings available.\n");
12319                 rc = -ENOMEM;
12320                 goto init_err_pci_clean;
12321         }
12322
12323         bnxt_fw_init_one_p3(bp);
12324
12325         if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12326                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
12327
12328         rc = bnxt_init_int_mode(bp);
12329         if (rc)
12330                 goto init_err_pci_clean;
12331
12332         /* No TC has been set yet and rings may have been trimmed due to
12333          * limited MSIX, so we re-initialize the TX rings per TC.
12334          */
12335         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12336
12337         if (BNXT_PF(bp)) {
12338                 if (!bnxt_pf_wq) {
12339                         bnxt_pf_wq =
12340                                 create_singlethread_workqueue("bnxt_pf_wq");
12341                         if (!bnxt_pf_wq) {
12342                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
12343                                 goto init_err_pci_clean;
12344                         }
12345                 }
12346                 rc = bnxt_init_tc(bp);
12347                 if (rc)
12348                         netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
12349                                    rc);
12350         }
12351
12352         bnxt_dl_register(bp);
12353
12354         rc = register_netdev(dev);
12355         if (rc)
12356                 goto init_err_cleanup;
12357
12358         if (BNXT_PF(bp))
12359                 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
12360         bnxt_dl_fw_reporters_create(bp);
12361
12362         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
12363                     board_info[ent->driver_data].name,
12364                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
12365         pcie_print_link_status(pdev);
12366
12367         pci_save_state(pdev);
12368         return 0;
12369
12370 init_err_cleanup:
12371         bnxt_dl_unregister(bp);
12372         bnxt_shutdown_tc(bp);
12373         bnxt_clear_int_mode(bp);
12374
12375 init_err_pci_clean:
12376         bnxt_hwrm_func_drv_unrgtr(bp);
12377         bnxt_free_hwrm_short_cmd_req(bp);
12378         bnxt_free_hwrm_resources(bp);
12379         kfree(bp->fw_health);
12380         bp->fw_health = NULL;
12381         bnxt_cleanup_pci(bp);
12382         bnxt_free_ctx_mem(bp);
12383         kfree(bp->ctx);
12384         bp->ctx = NULL;
12385         kfree(bp->rss_indir_tbl);
12386         bp->rss_indir_tbl = NULL;
12387
12388 init_err_free:
12389         free_netdev(dev);
12390         return rc;
12391 }
12392
12393 static void bnxt_shutdown(struct pci_dev *pdev)
12394 {
12395         struct net_device *dev = pci_get_drvdata(pdev);
12396         struct bnxt *bp;
12397
12398         if (!dev)
12399                 return;
12400
12401         rtnl_lock();
12402         bp = netdev_priv(dev);
12403         if (!bp)
12404                 goto shutdown_exit;
12405
12406         if (netif_running(dev))
12407                 dev_close(dev);
12408
12409         bnxt_ulp_shutdown(bp);
12410         bnxt_clear_int_mode(bp);
12411         pci_disable_device(pdev);
12412
12413         if (system_state == SYSTEM_POWER_OFF) {
12414                 pci_wake_from_d3(pdev, bp->wol);
12415                 pci_set_power_state(pdev, PCI_D3hot);
12416         }
12417
12418 shutdown_exit:
12419         rtnl_unlock();
12420 }
12421
12422 #ifdef CONFIG_PM_SLEEP
12423 static int bnxt_suspend(struct device *device)
12424 {
12425         struct net_device *dev = dev_get_drvdata(device);
12426         struct bnxt *bp = netdev_priv(dev);
12427         int rc = 0;
12428
12429         rtnl_lock();
12430         bnxt_ulp_stop(bp);
12431         if (netif_running(dev)) {
12432                 netif_device_detach(dev);
12433                 rc = bnxt_close(dev);
12434         }
12435         bnxt_hwrm_func_drv_unrgtr(bp);
12436         pci_disable_device(bp->pdev);
12437         bnxt_free_ctx_mem(bp);
12438         kfree(bp->ctx);
12439         bp->ctx = NULL;
12440         rtnl_unlock();
12441         return rc;
12442 }
12443
12444 static int bnxt_resume(struct device *device)
12445 {
12446         struct net_device *dev = dev_get_drvdata(device);
12447         struct bnxt *bp = netdev_priv(dev);
12448         int rc = 0;
12449
12450         rtnl_lock();
12451         rc = pci_enable_device(bp->pdev);
12452         if (rc) {
12453                 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
12454                            rc);
12455                 goto resume_exit;
12456         }
12457         pci_set_master(bp->pdev);
12458         if (bnxt_hwrm_ver_get(bp)) {
12459                 rc = -ENODEV;
12460                 goto resume_exit;
12461         }
12462         rc = bnxt_hwrm_func_reset(bp);
12463         if (rc) {
12464                 rc = -EBUSY;
12465                 goto resume_exit;
12466         }
12467
12468         rc = bnxt_hwrm_func_qcaps(bp);
12469         if (rc)
12470                 goto resume_exit;
12471
12472         if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
12473                 rc = -ENODEV;
12474                 goto resume_exit;
12475         }
12476
12477         bnxt_get_wol_settings(bp);
12478         if (netif_running(dev)) {
12479                 rc = bnxt_open(dev);
12480                 if (!rc)
12481                         netif_device_attach(dev);
12482         }
12483
12484 resume_exit:
12485         bnxt_ulp_start(bp, rc);
12486         if (!rc)
12487                 bnxt_reenable_sriov(bp);
12488         rtnl_unlock();
12489         return rc;
12490 }
12491
12492 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
12493 #define BNXT_PM_OPS (&bnxt_pm_ops)
12494
12495 #else
12496
12497 #define BNXT_PM_OPS NULL
12498
12499 #endif /* CONFIG_PM_SLEEP */
12500
12501 /**
12502  * bnxt_io_error_detected - called when PCI error is detected
12503  * @pdev: Pointer to PCI device
12504  * @state: The current pci connection state
12505  *
12506  * This function is called after a PCI bus error affecting
12507  * this device has been detected.
12508  */
12509 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
12510                                                pci_channel_state_t state)
12511 {
12512         struct net_device *netdev = pci_get_drvdata(pdev);
12513         struct bnxt *bp = netdev_priv(netdev);
12514
12515         netdev_info(netdev, "PCI I/O error detected\n");
12516
12517         rtnl_lock();
12518         netif_device_detach(netdev);
12519
12520         bnxt_ulp_stop(bp);
12521
12522         if (state == pci_channel_io_perm_failure) {
12523                 rtnl_unlock();
12524                 return PCI_ERS_RESULT_DISCONNECT;
12525         }
12526
12527         if (netif_running(netdev))
12528                 bnxt_close(netdev);
12529
12530         pci_disable_device(pdev);
12531         bnxt_free_ctx_mem(bp);
12532         kfree(bp->ctx);
12533         bp->ctx = NULL;
12534         rtnl_unlock();
12535
12536         /* Request a slot slot reset. */
12537         return PCI_ERS_RESULT_NEED_RESET;
12538 }
12539
12540 /**
12541  * bnxt_io_slot_reset - called after the pci bus has been reset.
12542  * @pdev: Pointer to PCI device
12543  *
12544  * Restart the card from scratch, as if from a cold-boot.
12545  * At this point, the card has exprienced a hard reset,
12546  * followed by fixups by BIOS, and has its config space
12547  * set up identically to what it was at cold boot.
12548  */
12549 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
12550 {
12551         struct net_device *netdev = pci_get_drvdata(pdev);
12552         struct bnxt *bp = netdev_priv(netdev);
12553         int err = 0;
12554         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
12555
12556         netdev_info(bp->dev, "PCI Slot Reset\n");
12557
12558         rtnl_lock();
12559
12560         if (pci_enable_device(pdev)) {
12561                 dev_err(&pdev->dev,
12562                         "Cannot re-enable PCI device after reset.\n");
12563         } else {
12564                 pci_set_master(pdev);
12565                 pci_restore_state(pdev);
12566                 pci_save_state(pdev);
12567
12568                 err = bnxt_hwrm_func_reset(bp);
12569                 if (!err) {
12570                         err = bnxt_hwrm_func_qcaps(bp);
12571                         if (!err && netif_running(netdev))
12572                                 err = bnxt_open(netdev);
12573                 }
12574                 bnxt_ulp_start(bp, err);
12575                 if (!err) {
12576                         bnxt_reenable_sriov(bp);
12577                         result = PCI_ERS_RESULT_RECOVERED;
12578                 }
12579         }
12580
12581         if (result != PCI_ERS_RESULT_RECOVERED) {
12582                 if (netif_running(netdev))
12583                         dev_close(netdev);
12584                 pci_disable_device(pdev);
12585         }
12586
12587         rtnl_unlock();
12588
12589         return result;
12590 }
12591
12592 /**
12593  * bnxt_io_resume - called when traffic can start flowing again.
12594  * @pdev: Pointer to PCI device
12595  *
12596  * This callback is called when the error recovery driver tells
12597  * us that its OK to resume normal operation.
12598  */
12599 static void bnxt_io_resume(struct pci_dev *pdev)
12600 {
12601         struct net_device *netdev = pci_get_drvdata(pdev);
12602
12603         rtnl_lock();
12604
12605         netif_device_attach(netdev);
12606
12607         rtnl_unlock();
12608 }
12609
12610 static const struct pci_error_handlers bnxt_err_handler = {
12611         .error_detected = bnxt_io_error_detected,
12612         .slot_reset     = bnxt_io_slot_reset,
12613         .resume         = bnxt_io_resume
12614 };
12615
12616 static struct pci_driver bnxt_pci_driver = {
12617         .name           = DRV_MODULE_NAME,
12618         .id_table       = bnxt_pci_tbl,
12619         .probe          = bnxt_init_one,
12620         .remove         = bnxt_remove_one,
12621         .shutdown       = bnxt_shutdown,
12622         .driver.pm      = BNXT_PM_OPS,
12623         .err_handler    = &bnxt_err_handler,
12624 #if defined(CONFIG_BNXT_SRIOV)
12625         .sriov_configure = bnxt_sriov_configure,
12626 #endif
12627 };
12628
12629 static int __init bnxt_init(void)
12630 {
12631         bnxt_debug_init();
12632         return pci_register_driver(&bnxt_pci_driver);
12633 }
12634
12635 static void __exit bnxt_exit(void)
12636 {
12637         pci_unregister_driver(&bnxt_pci_driver);
12638         if (bnxt_pf_wq)
12639                 destroy_workqueue(bnxt_pf_wq);
12640         bnxt_debug_exit();
12641 }
12642
12643 module_init(bnxt_init);
12644 module_exit(bnxt_exit);