Merge tag 'io_uring-5.15-2021-09-11' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/ptp_clock_kernel.h>
53 #include <linux/timecounter.h>
54 #include <linux/cpu_rmap.h>
55 #include <linux/cpumask.h>
56 #include <net/pkt_cls.h>
57 #include <linux/hwmon.h>
58 #include <linux/hwmon-sysfs.h>
59 #include <net/page_pool.h>
60
61 #include "bnxt_hsi.h"
62 #include "bnxt.h"
63 #include "bnxt_hwrm.h"
64 #include "bnxt_ulp.h"
65 #include "bnxt_sriov.h"
66 #include "bnxt_ethtool.h"
67 #include "bnxt_dcb.h"
68 #include "bnxt_xdp.h"
69 #include "bnxt_ptp.h"
70 #include "bnxt_vfr.h"
71 #include "bnxt_tc.h"
72 #include "bnxt_devlink.h"
73 #include "bnxt_debugfs.h"
74
75 #define BNXT_TX_TIMEOUT         (5 * HZ)
76 #define BNXT_DEF_MSG_ENABLE     (NETIF_MSG_DRV | NETIF_MSG_HW | \
77                                  NETIF_MSG_TX_ERR)
78
79 MODULE_LICENSE("GPL");
80 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
81
82 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84 #define BNXT_RX_COPY_THRESH 256
85
86 #define BNXT_TX_PUSH_THRESH 164
87
88 enum board_idx {
89         BCM57301,
90         BCM57302,
91         BCM57304,
92         BCM57417_NPAR,
93         BCM58700,
94         BCM57311,
95         BCM57312,
96         BCM57402,
97         BCM57404,
98         BCM57406,
99         BCM57402_NPAR,
100         BCM57407,
101         BCM57412,
102         BCM57414,
103         BCM57416,
104         BCM57417,
105         BCM57412_NPAR,
106         BCM57314,
107         BCM57417_SFP,
108         BCM57416_SFP,
109         BCM57404_NPAR,
110         BCM57406_NPAR,
111         BCM57407_SFP,
112         BCM57407_NPAR,
113         BCM57414_NPAR,
114         BCM57416_NPAR,
115         BCM57452,
116         BCM57454,
117         BCM5745x_NPAR,
118         BCM57508,
119         BCM57504,
120         BCM57502,
121         BCM57508_NPAR,
122         BCM57504_NPAR,
123         BCM57502_NPAR,
124         BCM58802,
125         BCM58804,
126         BCM58808,
127         NETXTREME_E_VF,
128         NETXTREME_C_VF,
129         NETXTREME_S_VF,
130         NETXTREME_C_VF_HV,
131         NETXTREME_E_VF_HV,
132         NETXTREME_E_P5_VF,
133         NETXTREME_E_P5_VF_HV,
134 };
135
136 /* indexed by enum above */
137 static const struct {
138         char *name;
139 } board_info[] = {
140         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
141         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
142         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
143         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
144         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
145         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
146         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
147         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
148         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
149         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
150         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
151         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
152         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
153         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
154         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
155         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
156         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
157         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
158         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
159         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
160         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
161         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
162         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
163         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
164         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
165         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
166         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
167         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
168         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
169         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
170         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
171         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
172         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
173         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
174         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
175         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
176         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
177         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
178         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
179         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
180         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
181         [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
182         [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
183         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
184         [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
185 };
186
187 static const struct pci_device_id bnxt_pci_tbl[] = {
188         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
189         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
190         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
191         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
192         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
193         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
194         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
195         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
196         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
197         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
198         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
199         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
200         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
201         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
202         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
203         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
204         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
205         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
206         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
207         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
208         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
209         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
210         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
211         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
212         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
213         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
214         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
215         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
216         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
217         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
218         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
219         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
220         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
221         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
222         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
223         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
224         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
225         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
226         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
227         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
228         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
229         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
230         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
231         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
232         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
233         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
234 #ifdef CONFIG_BNXT_SRIOV
235         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
236         { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
237         { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
238         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
239         { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
240         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
241         { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
242         { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
243         { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
244         { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
245         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
246         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
247         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
248         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
249         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
250         { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
251         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
252         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
253         { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
254         { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
255         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
256 #endif
257         { 0 }
258 };
259
260 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
261
262 static const u16 bnxt_vf_req_snif[] = {
263         HWRM_FUNC_CFG,
264         HWRM_FUNC_VF_CFG,
265         HWRM_PORT_PHY_QCFG,
266         HWRM_CFA_L2_FILTER_ALLOC,
267 };
268
269 static const u16 bnxt_async_events_arr[] = {
270         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
271         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
272         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
273         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
274         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
275         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
276         ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
277         ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
278         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
279         ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
280         ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
281         ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
282         ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
283         ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
284         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
285 };
286
287 static struct workqueue_struct *bnxt_pf_wq;
288
289 static bool bnxt_vf_pciid(enum board_idx idx)
290 {
291         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
292                 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
293                 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
294                 idx == NETXTREME_E_P5_VF_HV);
295 }
296
297 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
298 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
299 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
300
301 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
302                 writel(DB_CP_IRQ_DIS_FLAGS, db)
303
304 #define BNXT_DB_CQ(db, idx)                                             \
305         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
306
307 #define BNXT_DB_NQ_P5(db, idx)                                          \
308         bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx),   \
309                     (db)->doorbell)
310
311 #define BNXT_DB_CQ_ARM(db, idx)                                         \
312         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
313
314 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
315         bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
316                     (db)->doorbell)
317
318 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
319 {
320         if (bp->flags & BNXT_FLAG_CHIP_P5)
321                 BNXT_DB_NQ_P5(db, idx);
322         else
323                 BNXT_DB_CQ(db, idx);
324 }
325
326 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
327 {
328         if (bp->flags & BNXT_FLAG_CHIP_P5)
329                 BNXT_DB_NQ_ARM_P5(db, idx);
330         else
331                 BNXT_DB_CQ_ARM(db, idx);
332 }
333
334 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
335 {
336         if (bp->flags & BNXT_FLAG_CHIP_P5)
337                 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
338                             RING_CMP(idx), db->doorbell);
339         else
340                 BNXT_DB_CQ(db, idx);
341 }
342
343 const u16 bnxt_lhint_arr[] = {
344         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
345         TX_BD_FLAGS_LHINT_512_TO_1023,
346         TX_BD_FLAGS_LHINT_1024_TO_2047,
347         TX_BD_FLAGS_LHINT_1024_TO_2047,
348         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
357         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
358         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
359         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
360         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
361         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
362         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
363 };
364
365 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
366 {
367         struct metadata_dst *md_dst = skb_metadata_dst(skb);
368
369         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
370                 return 0;
371
372         return md_dst->u.port_info.port_id;
373 }
374
375 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
376                              u16 prod)
377 {
378         bnxt_db_write(bp, &txr->tx_db, prod);
379         txr->kick_pending = 0;
380 }
381
382 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
383                                           struct bnxt_tx_ring_info *txr,
384                                           struct netdev_queue *txq)
385 {
386         netif_tx_stop_queue(txq);
387
388         /* netif_tx_stop_queue() must be done before checking
389          * tx index in bnxt_tx_avail() below, because in
390          * bnxt_tx_int(), we update tx index before checking for
391          * netif_tx_queue_stopped().
392          */
393         smp_mb();
394         if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
395                 netif_tx_wake_queue(txq);
396                 return false;
397         }
398
399         return true;
400 }
401
402 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
403 {
404         struct bnxt *bp = netdev_priv(dev);
405         struct tx_bd *txbd;
406         struct tx_bd_ext *txbd1;
407         struct netdev_queue *txq;
408         int i;
409         dma_addr_t mapping;
410         unsigned int length, pad = 0;
411         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
412         u16 prod, last_frag;
413         struct pci_dev *pdev = bp->pdev;
414         struct bnxt_tx_ring_info *txr;
415         struct bnxt_sw_tx_bd *tx_buf;
416         __le32 lflags = 0;
417
418         i = skb_get_queue_mapping(skb);
419         if (unlikely(i >= bp->tx_nr_rings)) {
420                 dev_kfree_skb_any(skb);
421                 atomic_long_inc(&dev->tx_dropped);
422                 return NETDEV_TX_OK;
423         }
424
425         txq = netdev_get_tx_queue(dev, i);
426         txr = &bp->tx_ring[bp->tx_ring_map[i]];
427         prod = txr->tx_prod;
428
429         free_size = bnxt_tx_avail(bp, txr);
430         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
431                 /* We must have raced with NAPI cleanup */
432                 if (net_ratelimit() && txr->kick_pending)
433                         netif_warn(bp, tx_err, dev,
434                                    "bnxt: ring busy w/ flush pending!\n");
435                 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
436                         return NETDEV_TX_BUSY;
437         }
438
439         length = skb->len;
440         len = skb_headlen(skb);
441         last_frag = skb_shinfo(skb)->nr_frags;
442
443         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
444
445         txbd->tx_bd_opaque = prod;
446
447         tx_buf = &txr->tx_buf_ring[prod];
448         tx_buf->skb = skb;
449         tx_buf->nr_frags = last_frag;
450
451         vlan_tag_flags = 0;
452         cfa_action = bnxt_xmit_get_cfa_action(skb);
453         if (skb_vlan_tag_present(skb)) {
454                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
455                                  skb_vlan_tag_get(skb);
456                 /* Currently supports 8021Q, 8021AD vlan offloads
457                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
458                  */
459                 if (skb->vlan_proto == htons(ETH_P_8021Q))
460                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
461         }
462
463         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
464                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
465
466                 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
467                     atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
468                         if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
469                                             &ptp->tx_hdr_off)) {
470                                 if (vlan_tag_flags)
471                                         ptp->tx_hdr_off += VLAN_HLEN;
472                                 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
473                                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
474                         } else {
475                                 atomic_inc(&bp->ptp_cfg->tx_avail);
476                         }
477                 }
478         }
479
480         if (unlikely(skb->no_fcs))
481                 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
482
483         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
484             !lflags) {
485                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
486                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
487                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
488                 void __iomem *db = txr->tx_db.doorbell;
489                 void *pdata = tx_push_buf->data;
490                 u64 *end;
491                 int j, push_len;
492
493                 /* Set COAL_NOW to be ready quickly for the next push */
494                 tx_push->tx_bd_len_flags_type =
495                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
496                                         TX_BD_TYPE_LONG_TX_BD |
497                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
498                                         TX_BD_FLAGS_COAL_NOW |
499                                         TX_BD_FLAGS_PACKET_END |
500                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
501
502                 if (skb->ip_summed == CHECKSUM_PARTIAL)
503                         tx_push1->tx_bd_hsize_lflags =
504                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
505                 else
506                         tx_push1->tx_bd_hsize_lflags = 0;
507
508                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
509                 tx_push1->tx_bd_cfa_action =
510                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
511
512                 end = pdata + length;
513                 end = PTR_ALIGN(end, 8) - 1;
514                 *end = 0;
515
516                 skb_copy_from_linear_data(skb, pdata, len);
517                 pdata += len;
518                 for (j = 0; j < last_frag; j++) {
519                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
520                         void *fptr;
521
522                         fptr = skb_frag_address_safe(frag);
523                         if (!fptr)
524                                 goto normal_tx;
525
526                         memcpy(pdata, fptr, skb_frag_size(frag));
527                         pdata += skb_frag_size(frag);
528                 }
529
530                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
531                 txbd->tx_bd_haddr = txr->data_mapping;
532                 prod = NEXT_TX(prod);
533                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
534                 memcpy(txbd, tx_push1, sizeof(*txbd));
535                 prod = NEXT_TX(prod);
536                 tx_push->doorbell =
537                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
538                 txr->tx_prod = prod;
539
540                 tx_buf->is_push = 1;
541                 netdev_tx_sent_queue(txq, skb->len);
542                 wmb();  /* Sync is_push and byte queue before pushing data */
543
544                 push_len = (length + sizeof(*tx_push) + 7) / 8;
545                 if (push_len > 16) {
546                         __iowrite64_copy(db, tx_push_buf, 16);
547                         __iowrite32_copy(db + 4, tx_push_buf + 1,
548                                          (push_len - 16) << 1);
549                 } else {
550                         __iowrite64_copy(db, tx_push_buf, push_len);
551                 }
552
553                 goto tx_done;
554         }
555
556 normal_tx:
557         if (length < BNXT_MIN_PKT_SIZE) {
558                 pad = BNXT_MIN_PKT_SIZE - length;
559                 if (skb_pad(skb, pad))
560                         /* SKB already freed. */
561                         goto tx_kick_pending;
562                 length = BNXT_MIN_PKT_SIZE;
563         }
564
565         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
566
567         if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
568                 goto tx_free;
569
570         dma_unmap_addr_set(tx_buf, mapping, mapping);
571         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
572                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
573
574         txbd->tx_bd_haddr = cpu_to_le64(mapping);
575
576         prod = NEXT_TX(prod);
577         txbd1 = (struct tx_bd_ext *)
578                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
579
580         txbd1->tx_bd_hsize_lflags = lflags;
581         if (skb_is_gso(skb)) {
582                 u32 hdr_len;
583
584                 if (skb->encapsulation)
585                         hdr_len = skb_inner_network_offset(skb) +
586                                 skb_inner_network_header_len(skb) +
587                                 inner_tcp_hdrlen(skb);
588                 else
589                         hdr_len = skb_transport_offset(skb) +
590                                 tcp_hdrlen(skb);
591
592                 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
593                                         TX_BD_FLAGS_T_IPID |
594                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
595                 length = skb_shinfo(skb)->gso_size;
596                 txbd1->tx_bd_mss = cpu_to_le32(length);
597                 length += hdr_len;
598         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
599                 txbd1->tx_bd_hsize_lflags |=
600                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
601                 txbd1->tx_bd_mss = 0;
602         }
603
604         length >>= 9;
605         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
606                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
607                                      skb->len);
608                 i = 0;
609                 goto tx_dma_error;
610         }
611         flags |= bnxt_lhint_arr[length];
612         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
613
614         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
615         txbd1->tx_bd_cfa_action =
616                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
617         for (i = 0; i < last_frag; i++) {
618                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
619
620                 prod = NEXT_TX(prod);
621                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
622
623                 len = skb_frag_size(frag);
624                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
625                                            DMA_TO_DEVICE);
626
627                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
628                         goto tx_dma_error;
629
630                 tx_buf = &txr->tx_buf_ring[prod];
631                 dma_unmap_addr_set(tx_buf, mapping, mapping);
632
633                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
634
635                 flags = len << TX_BD_LEN_SHIFT;
636                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
637         }
638
639         flags &= ~TX_BD_LEN;
640         txbd->tx_bd_len_flags_type =
641                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
642                             TX_BD_FLAGS_PACKET_END);
643
644         netdev_tx_sent_queue(txq, skb->len);
645
646         skb_tx_timestamp(skb);
647
648         /* Sync BD data before updating doorbell */
649         wmb();
650
651         prod = NEXT_TX(prod);
652         txr->tx_prod = prod;
653
654         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
655                 bnxt_txr_db_kick(bp, txr, prod);
656         else
657                 txr->kick_pending = 1;
658
659 tx_done:
660
661         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
662                 if (netdev_xmit_more() && !tx_buf->is_push)
663                         bnxt_txr_db_kick(bp, txr, prod);
664
665                 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
666         }
667         return NETDEV_TX_OK;
668
669 tx_dma_error:
670         if (BNXT_TX_PTP_IS_SET(lflags))
671                 atomic_inc(&bp->ptp_cfg->tx_avail);
672
673         last_frag = i;
674
675         /* start back at beginning and unmap skb */
676         prod = txr->tx_prod;
677         tx_buf = &txr->tx_buf_ring[prod];
678         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
679                          skb_headlen(skb), DMA_TO_DEVICE);
680         prod = NEXT_TX(prod);
681
682         /* unmap remaining mapped pages */
683         for (i = 0; i < last_frag; i++) {
684                 prod = NEXT_TX(prod);
685                 tx_buf = &txr->tx_buf_ring[prod];
686                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
687                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
688                                DMA_TO_DEVICE);
689         }
690
691 tx_free:
692         dev_kfree_skb_any(skb);
693 tx_kick_pending:
694         if (txr->kick_pending)
695                 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
696         txr->tx_buf_ring[txr->tx_prod].skb = NULL;
697         atomic_long_inc(&dev->tx_dropped);
698         return NETDEV_TX_OK;
699 }
700
701 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
702 {
703         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
704         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
705         u16 cons = txr->tx_cons;
706         struct pci_dev *pdev = bp->pdev;
707         int i;
708         unsigned int tx_bytes = 0;
709
710         for (i = 0; i < nr_pkts; i++) {
711                 struct bnxt_sw_tx_bd *tx_buf;
712                 bool compl_deferred = false;
713                 struct sk_buff *skb;
714                 int j, last;
715
716                 tx_buf = &txr->tx_buf_ring[cons];
717                 cons = NEXT_TX(cons);
718                 skb = tx_buf->skb;
719                 tx_buf->skb = NULL;
720
721                 if (tx_buf->is_push) {
722                         tx_buf->is_push = 0;
723                         goto next_tx_int;
724                 }
725
726                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
727                                  skb_headlen(skb), DMA_TO_DEVICE);
728                 last = tx_buf->nr_frags;
729
730                 for (j = 0; j < last; j++) {
731                         cons = NEXT_TX(cons);
732                         tx_buf = &txr->tx_buf_ring[cons];
733                         dma_unmap_page(
734                                 &pdev->dev,
735                                 dma_unmap_addr(tx_buf, mapping),
736                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
737                                 DMA_TO_DEVICE);
738                 }
739                 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
740                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
741                                 if (!bnxt_get_tx_ts_p5(bp, skb))
742                                         compl_deferred = true;
743                                 else
744                                         atomic_inc(&bp->ptp_cfg->tx_avail);
745                         }
746                 }
747
748 next_tx_int:
749                 cons = NEXT_TX(cons);
750
751                 tx_bytes += skb->len;
752                 if (!compl_deferred)
753                         dev_kfree_skb_any(skb);
754         }
755
756         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
757         txr->tx_cons = cons;
758
759         /* Need to make the tx_cons update visible to bnxt_start_xmit()
760          * before checking for netif_tx_queue_stopped().  Without the
761          * memory barrier, there is a small possibility that bnxt_start_xmit()
762          * will miss it and cause the queue to be stopped forever.
763          */
764         smp_mb();
765
766         if (unlikely(netif_tx_queue_stopped(txq)) &&
767             bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
768             READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
769                 netif_tx_wake_queue(txq);
770 }
771
772 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
773                                          struct bnxt_rx_ring_info *rxr,
774                                          gfp_t gfp)
775 {
776         struct device *dev = &bp->pdev->dev;
777         struct page *page;
778
779         page = page_pool_dev_alloc_pages(rxr->page_pool);
780         if (!page)
781                 return NULL;
782
783         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
784                                       DMA_ATTR_WEAK_ORDERING);
785         if (dma_mapping_error(dev, *mapping)) {
786                 page_pool_recycle_direct(rxr->page_pool, page);
787                 return NULL;
788         }
789         *mapping += bp->rx_dma_offset;
790         return page;
791 }
792
793 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
794                                        gfp_t gfp)
795 {
796         u8 *data;
797         struct pci_dev *pdev = bp->pdev;
798
799         data = kmalloc(bp->rx_buf_size, gfp);
800         if (!data)
801                 return NULL;
802
803         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
804                                         bp->rx_buf_use_size, bp->rx_dir,
805                                         DMA_ATTR_WEAK_ORDERING);
806
807         if (dma_mapping_error(&pdev->dev, *mapping)) {
808                 kfree(data);
809                 data = NULL;
810         }
811         return data;
812 }
813
814 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
815                        u16 prod, gfp_t gfp)
816 {
817         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
818         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
819         dma_addr_t mapping;
820
821         if (BNXT_RX_PAGE_MODE(bp)) {
822                 struct page *page =
823                         __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
824
825                 if (!page)
826                         return -ENOMEM;
827
828                 rx_buf->data = page;
829                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
830         } else {
831                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
832
833                 if (!data)
834                         return -ENOMEM;
835
836                 rx_buf->data = data;
837                 rx_buf->data_ptr = data + bp->rx_offset;
838         }
839         rx_buf->mapping = mapping;
840
841         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
842         return 0;
843 }
844
845 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
846 {
847         u16 prod = rxr->rx_prod;
848         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
849         struct rx_bd *cons_bd, *prod_bd;
850
851         prod_rx_buf = &rxr->rx_buf_ring[prod];
852         cons_rx_buf = &rxr->rx_buf_ring[cons];
853
854         prod_rx_buf->data = data;
855         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
856
857         prod_rx_buf->mapping = cons_rx_buf->mapping;
858
859         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
860         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
861
862         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
863 }
864
865 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
866 {
867         u16 next, max = rxr->rx_agg_bmap_size;
868
869         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
870         if (next >= max)
871                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
872         return next;
873 }
874
875 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
876                                      struct bnxt_rx_ring_info *rxr,
877                                      u16 prod, gfp_t gfp)
878 {
879         struct rx_bd *rxbd =
880                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
881         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
882         struct pci_dev *pdev = bp->pdev;
883         struct page *page;
884         dma_addr_t mapping;
885         u16 sw_prod = rxr->rx_sw_agg_prod;
886         unsigned int offset = 0;
887
888         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
889                 page = rxr->rx_page;
890                 if (!page) {
891                         page = alloc_page(gfp);
892                         if (!page)
893                                 return -ENOMEM;
894                         rxr->rx_page = page;
895                         rxr->rx_page_offset = 0;
896                 }
897                 offset = rxr->rx_page_offset;
898                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
899                 if (rxr->rx_page_offset == PAGE_SIZE)
900                         rxr->rx_page = NULL;
901                 else
902                         get_page(page);
903         } else {
904                 page = alloc_page(gfp);
905                 if (!page)
906                         return -ENOMEM;
907         }
908
909         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
910                                      BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
911                                      DMA_ATTR_WEAK_ORDERING);
912         if (dma_mapping_error(&pdev->dev, mapping)) {
913                 __free_page(page);
914                 return -EIO;
915         }
916
917         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
918                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
919
920         __set_bit(sw_prod, rxr->rx_agg_bmap);
921         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
922         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
923
924         rx_agg_buf->page = page;
925         rx_agg_buf->offset = offset;
926         rx_agg_buf->mapping = mapping;
927         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
928         rxbd->rx_bd_opaque = sw_prod;
929         return 0;
930 }
931
932 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
933                                        struct bnxt_cp_ring_info *cpr,
934                                        u16 cp_cons, u16 curr)
935 {
936         struct rx_agg_cmp *agg;
937
938         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
939         agg = (struct rx_agg_cmp *)
940                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
941         return agg;
942 }
943
944 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
945                                               struct bnxt_rx_ring_info *rxr,
946                                               u16 agg_id, u16 curr)
947 {
948         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
949
950         return &tpa_info->agg_arr[curr];
951 }
952
953 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
954                                    u16 start, u32 agg_bufs, bool tpa)
955 {
956         struct bnxt_napi *bnapi = cpr->bnapi;
957         struct bnxt *bp = bnapi->bp;
958         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
959         u16 prod = rxr->rx_agg_prod;
960         u16 sw_prod = rxr->rx_sw_agg_prod;
961         bool p5_tpa = false;
962         u32 i;
963
964         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
965                 p5_tpa = true;
966
967         for (i = 0; i < agg_bufs; i++) {
968                 u16 cons;
969                 struct rx_agg_cmp *agg;
970                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
971                 struct rx_bd *prod_bd;
972                 struct page *page;
973
974                 if (p5_tpa)
975                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
976                 else
977                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
978                 cons = agg->rx_agg_cmp_opaque;
979                 __clear_bit(cons, rxr->rx_agg_bmap);
980
981                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
982                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
983
984                 __set_bit(sw_prod, rxr->rx_agg_bmap);
985                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
986                 cons_rx_buf = &rxr->rx_agg_ring[cons];
987
988                 /* It is possible for sw_prod to be equal to cons, so
989                  * set cons_rx_buf->page to NULL first.
990                  */
991                 page = cons_rx_buf->page;
992                 cons_rx_buf->page = NULL;
993                 prod_rx_buf->page = page;
994                 prod_rx_buf->offset = cons_rx_buf->offset;
995
996                 prod_rx_buf->mapping = cons_rx_buf->mapping;
997
998                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
999
1000                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1001                 prod_bd->rx_bd_opaque = sw_prod;
1002
1003                 prod = NEXT_RX_AGG(prod);
1004                 sw_prod = NEXT_RX_AGG(sw_prod);
1005         }
1006         rxr->rx_agg_prod = prod;
1007         rxr->rx_sw_agg_prod = sw_prod;
1008 }
1009
1010 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1011                                         struct bnxt_rx_ring_info *rxr,
1012                                         u16 cons, void *data, u8 *data_ptr,
1013                                         dma_addr_t dma_addr,
1014                                         unsigned int offset_and_len)
1015 {
1016         unsigned int payload = offset_and_len >> 16;
1017         unsigned int len = offset_and_len & 0xffff;
1018         skb_frag_t *frag;
1019         struct page *page = data;
1020         u16 prod = rxr->rx_prod;
1021         struct sk_buff *skb;
1022         int off, err;
1023
1024         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1025         if (unlikely(err)) {
1026                 bnxt_reuse_rx_data(rxr, cons, data);
1027                 return NULL;
1028         }
1029         dma_addr -= bp->rx_dma_offset;
1030         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1031                              DMA_ATTR_WEAK_ORDERING);
1032         page_pool_release_page(rxr->page_pool, page);
1033
1034         if (unlikely(!payload))
1035                 payload = eth_get_headlen(bp->dev, data_ptr, len);
1036
1037         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1038         if (!skb) {
1039                 __free_page(page);
1040                 return NULL;
1041         }
1042
1043         off = (void *)data_ptr - page_address(page);
1044         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1045         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1046                payload + NET_IP_ALIGN);
1047
1048         frag = &skb_shinfo(skb)->frags[0];
1049         skb_frag_size_sub(frag, payload);
1050         skb_frag_off_add(frag, payload);
1051         skb->data_len -= payload;
1052         skb->tail += payload;
1053
1054         return skb;
1055 }
1056
1057 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1058                                    struct bnxt_rx_ring_info *rxr, u16 cons,
1059                                    void *data, u8 *data_ptr,
1060                                    dma_addr_t dma_addr,
1061                                    unsigned int offset_and_len)
1062 {
1063         u16 prod = rxr->rx_prod;
1064         struct sk_buff *skb;
1065         int err;
1066
1067         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1068         if (unlikely(err)) {
1069                 bnxt_reuse_rx_data(rxr, cons, data);
1070                 return NULL;
1071         }
1072
1073         skb = build_skb(data, 0);
1074         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1075                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1076         if (!skb) {
1077                 kfree(data);
1078                 return NULL;
1079         }
1080
1081         skb_reserve(skb, bp->rx_offset);
1082         skb_put(skb, offset_and_len & 0xffff);
1083         return skb;
1084 }
1085
1086 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1087                                      struct bnxt_cp_ring_info *cpr,
1088                                      struct sk_buff *skb, u16 idx,
1089                                      u32 agg_bufs, bool tpa)
1090 {
1091         struct bnxt_napi *bnapi = cpr->bnapi;
1092         struct pci_dev *pdev = bp->pdev;
1093         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1094         u16 prod = rxr->rx_agg_prod;
1095         bool p5_tpa = false;
1096         u32 i;
1097
1098         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1099                 p5_tpa = true;
1100
1101         for (i = 0; i < agg_bufs; i++) {
1102                 u16 cons, frag_len;
1103                 struct rx_agg_cmp *agg;
1104                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1105                 struct page *page;
1106                 dma_addr_t mapping;
1107
1108                 if (p5_tpa)
1109                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1110                 else
1111                         agg = bnxt_get_agg(bp, cpr, idx, i);
1112                 cons = agg->rx_agg_cmp_opaque;
1113                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1114                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1115
1116                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1117                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1118                                    cons_rx_buf->offset, frag_len);
1119                 __clear_bit(cons, rxr->rx_agg_bmap);
1120
1121                 /* It is possible for bnxt_alloc_rx_page() to allocate
1122                  * a sw_prod index that equals the cons index, so we
1123                  * need to clear the cons entry now.
1124                  */
1125                 mapping = cons_rx_buf->mapping;
1126                 page = cons_rx_buf->page;
1127                 cons_rx_buf->page = NULL;
1128
1129                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1130                         struct skb_shared_info *shinfo;
1131                         unsigned int nr_frags;
1132
1133                         shinfo = skb_shinfo(skb);
1134                         nr_frags = --shinfo->nr_frags;
1135                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1136
1137                         dev_kfree_skb(skb);
1138
1139                         cons_rx_buf->page = page;
1140
1141                         /* Update prod since possibly some pages have been
1142                          * allocated already.
1143                          */
1144                         rxr->rx_agg_prod = prod;
1145                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1146                         return NULL;
1147                 }
1148
1149                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1150                                      DMA_FROM_DEVICE,
1151                                      DMA_ATTR_WEAK_ORDERING);
1152
1153                 skb->data_len += frag_len;
1154                 skb->len += frag_len;
1155                 skb->truesize += PAGE_SIZE;
1156
1157                 prod = NEXT_RX_AGG(prod);
1158         }
1159         rxr->rx_agg_prod = prod;
1160         return skb;
1161 }
1162
1163 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1164                                u8 agg_bufs, u32 *raw_cons)
1165 {
1166         u16 last;
1167         struct rx_agg_cmp *agg;
1168
1169         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1170         last = RING_CMP(*raw_cons);
1171         agg = (struct rx_agg_cmp *)
1172                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1173         return RX_AGG_CMP_VALID(agg, *raw_cons);
1174 }
1175
1176 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1177                                             unsigned int len,
1178                                             dma_addr_t mapping)
1179 {
1180         struct bnxt *bp = bnapi->bp;
1181         struct pci_dev *pdev = bp->pdev;
1182         struct sk_buff *skb;
1183
1184         skb = napi_alloc_skb(&bnapi->napi, len);
1185         if (!skb)
1186                 return NULL;
1187
1188         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1189                                 bp->rx_dir);
1190
1191         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1192                len + NET_IP_ALIGN);
1193
1194         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1195                                    bp->rx_dir);
1196
1197         skb_put(skb, len);
1198         return skb;
1199 }
1200
1201 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1202                            u32 *raw_cons, void *cmp)
1203 {
1204         struct rx_cmp *rxcmp = cmp;
1205         u32 tmp_raw_cons = *raw_cons;
1206         u8 cmp_type, agg_bufs = 0;
1207
1208         cmp_type = RX_CMP_TYPE(rxcmp);
1209
1210         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1211                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1212                             RX_CMP_AGG_BUFS) >>
1213                            RX_CMP_AGG_BUFS_SHIFT;
1214         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1215                 struct rx_tpa_end_cmp *tpa_end = cmp;
1216
1217                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1218                         return 0;
1219
1220                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1221         }
1222
1223         if (agg_bufs) {
1224                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1225                         return -EBUSY;
1226         }
1227         *raw_cons = tmp_raw_cons;
1228         return 0;
1229 }
1230
1231 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1232 {
1233         if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1234                 return;
1235
1236         if (BNXT_PF(bp))
1237                 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1238         else
1239                 schedule_delayed_work(&bp->fw_reset_task, delay);
1240 }
1241
1242 static void bnxt_queue_sp_work(struct bnxt *bp)
1243 {
1244         if (BNXT_PF(bp))
1245                 queue_work(bnxt_pf_wq, &bp->sp_task);
1246         else
1247                 schedule_work(&bp->sp_task);
1248 }
1249
1250 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1251 {
1252         if (!rxr->bnapi->in_reset) {
1253                 rxr->bnapi->in_reset = true;
1254                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1255                         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1256                 else
1257                         set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1258                 bnxt_queue_sp_work(bp);
1259         }
1260         rxr->rx_next_cons = 0xffff;
1261 }
1262
1263 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1264 {
1265         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1266         u16 idx = agg_id & MAX_TPA_P5_MASK;
1267
1268         if (test_bit(idx, map->agg_idx_bmap))
1269                 idx = find_first_zero_bit(map->agg_idx_bmap,
1270                                           BNXT_AGG_IDX_BMAP_SIZE);
1271         __set_bit(idx, map->agg_idx_bmap);
1272         map->agg_id_tbl[agg_id] = idx;
1273         return idx;
1274 }
1275
1276 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1277 {
1278         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1279
1280         __clear_bit(idx, map->agg_idx_bmap);
1281 }
1282
1283 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1284 {
1285         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1286
1287         return map->agg_id_tbl[agg_id];
1288 }
1289
1290 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1291                            struct rx_tpa_start_cmp *tpa_start,
1292                            struct rx_tpa_start_cmp_ext *tpa_start1)
1293 {
1294         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1295         struct bnxt_tpa_info *tpa_info;
1296         u16 cons, prod, agg_id;
1297         struct rx_bd *prod_bd;
1298         dma_addr_t mapping;
1299
1300         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1301                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1302                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1303         } else {
1304                 agg_id = TPA_START_AGG_ID(tpa_start);
1305         }
1306         cons = tpa_start->rx_tpa_start_cmp_opaque;
1307         prod = rxr->rx_prod;
1308         cons_rx_buf = &rxr->rx_buf_ring[cons];
1309         prod_rx_buf = &rxr->rx_buf_ring[prod];
1310         tpa_info = &rxr->rx_tpa[agg_id];
1311
1312         if (unlikely(cons != rxr->rx_next_cons ||
1313                      TPA_START_ERROR(tpa_start))) {
1314                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1315                             cons, rxr->rx_next_cons,
1316                             TPA_START_ERROR_CODE(tpa_start1));
1317                 bnxt_sched_reset(bp, rxr);
1318                 return;
1319         }
1320         /* Store cfa_code in tpa_info to use in tpa_end
1321          * completion processing.
1322          */
1323         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1324         prod_rx_buf->data = tpa_info->data;
1325         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1326
1327         mapping = tpa_info->mapping;
1328         prod_rx_buf->mapping = mapping;
1329
1330         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1331
1332         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1333
1334         tpa_info->data = cons_rx_buf->data;
1335         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1336         cons_rx_buf->data = NULL;
1337         tpa_info->mapping = cons_rx_buf->mapping;
1338
1339         tpa_info->len =
1340                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1341                                 RX_TPA_START_CMP_LEN_SHIFT;
1342         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1343                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1344
1345                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1346                 tpa_info->gso_type = SKB_GSO_TCPV4;
1347                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1348                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1349                         tpa_info->gso_type = SKB_GSO_TCPV6;
1350                 tpa_info->rss_hash =
1351                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1352         } else {
1353                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1354                 tpa_info->gso_type = 0;
1355                 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1356         }
1357         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1358         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1359         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1360         tpa_info->agg_count = 0;
1361
1362         rxr->rx_prod = NEXT_RX(prod);
1363         cons = NEXT_RX(cons);
1364         rxr->rx_next_cons = NEXT_RX(cons);
1365         cons_rx_buf = &rxr->rx_buf_ring[cons];
1366
1367         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1368         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1369         cons_rx_buf->data = NULL;
1370 }
1371
1372 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1373 {
1374         if (agg_bufs)
1375                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1376 }
1377
1378 #ifdef CONFIG_INET
1379 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1380 {
1381         struct udphdr *uh = NULL;
1382
1383         if (ip_proto == htons(ETH_P_IP)) {
1384                 struct iphdr *iph = (struct iphdr *)skb->data;
1385
1386                 if (iph->protocol == IPPROTO_UDP)
1387                         uh = (struct udphdr *)(iph + 1);
1388         } else {
1389                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1390
1391                 if (iph->nexthdr == IPPROTO_UDP)
1392                         uh = (struct udphdr *)(iph + 1);
1393         }
1394         if (uh) {
1395                 if (uh->check)
1396                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1397                 else
1398                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1399         }
1400 }
1401 #endif
1402
1403 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1404                                            int payload_off, int tcp_ts,
1405                                            struct sk_buff *skb)
1406 {
1407 #ifdef CONFIG_INET
1408         struct tcphdr *th;
1409         int len, nw_off;
1410         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1411         u32 hdr_info = tpa_info->hdr_info;
1412         bool loopback = false;
1413
1414         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1415         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1416         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1417
1418         /* If the packet is an internal loopback packet, the offsets will
1419          * have an extra 4 bytes.
1420          */
1421         if (inner_mac_off == 4) {
1422                 loopback = true;
1423         } else if (inner_mac_off > 4) {
1424                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1425                                             ETH_HLEN - 2));
1426
1427                 /* We only support inner iPv4/ipv6.  If we don't see the
1428                  * correct protocol ID, it must be a loopback packet where
1429                  * the offsets are off by 4.
1430                  */
1431                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1432                         loopback = true;
1433         }
1434         if (loopback) {
1435                 /* internal loopback packet, subtract all offsets by 4 */
1436                 inner_ip_off -= 4;
1437                 inner_mac_off -= 4;
1438                 outer_ip_off -= 4;
1439         }
1440
1441         nw_off = inner_ip_off - ETH_HLEN;
1442         skb_set_network_header(skb, nw_off);
1443         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1444                 struct ipv6hdr *iph = ipv6_hdr(skb);
1445
1446                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1447                 len = skb->len - skb_transport_offset(skb);
1448                 th = tcp_hdr(skb);
1449                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1450         } else {
1451                 struct iphdr *iph = ip_hdr(skb);
1452
1453                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1454                 len = skb->len - skb_transport_offset(skb);
1455                 th = tcp_hdr(skb);
1456                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1457         }
1458
1459         if (inner_mac_off) { /* tunnel */
1460                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1461                                             ETH_HLEN - 2));
1462
1463                 bnxt_gro_tunnel(skb, proto);
1464         }
1465 #endif
1466         return skb;
1467 }
1468
1469 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1470                                            int payload_off, int tcp_ts,
1471                                            struct sk_buff *skb)
1472 {
1473 #ifdef CONFIG_INET
1474         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1475         u32 hdr_info = tpa_info->hdr_info;
1476         int iphdr_len, nw_off;
1477
1478         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1479         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1480         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1481
1482         nw_off = inner_ip_off - ETH_HLEN;
1483         skb_set_network_header(skb, nw_off);
1484         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1485                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1486         skb_set_transport_header(skb, nw_off + iphdr_len);
1487
1488         if (inner_mac_off) { /* tunnel */
1489                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1490                                             ETH_HLEN - 2));
1491
1492                 bnxt_gro_tunnel(skb, proto);
1493         }
1494 #endif
1495         return skb;
1496 }
1497
1498 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1499 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1500
1501 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1502                                            int payload_off, int tcp_ts,
1503                                            struct sk_buff *skb)
1504 {
1505 #ifdef CONFIG_INET
1506         struct tcphdr *th;
1507         int len, nw_off, tcp_opt_len = 0;
1508
1509         if (tcp_ts)
1510                 tcp_opt_len = 12;
1511
1512         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1513                 struct iphdr *iph;
1514
1515                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1516                          ETH_HLEN;
1517                 skb_set_network_header(skb, nw_off);
1518                 iph = ip_hdr(skb);
1519                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1520                 len = skb->len - skb_transport_offset(skb);
1521                 th = tcp_hdr(skb);
1522                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1523         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1524                 struct ipv6hdr *iph;
1525
1526                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1527                          ETH_HLEN;
1528                 skb_set_network_header(skb, nw_off);
1529                 iph = ipv6_hdr(skb);
1530                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1531                 len = skb->len - skb_transport_offset(skb);
1532                 th = tcp_hdr(skb);
1533                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1534         } else {
1535                 dev_kfree_skb_any(skb);
1536                 return NULL;
1537         }
1538
1539         if (nw_off) /* tunnel */
1540                 bnxt_gro_tunnel(skb, skb->protocol);
1541 #endif
1542         return skb;
1543 }
1544
1545 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1546                                            struct bnxt_tpa_info *tpa_info,
1547                                            struct rx_tpa_end_cmp *tpa_end,
1548                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1549                                            struct sk_buff *skb)
1550 {
1551 #ifdef CONFIG_INET
1552         int payload_off;
1553         u16 segs;
1554
1555         segs = TPA_END_TPA_SEGS(tpa_end);
1556         if (segs == 1)
1557                 return skb;
1558
1559         NAPI_GRO_CB(skb)->count = segs;
1560         skb_shinfo(skb)->gso_size =
1561                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1562         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1563         if (bp->flags & BNXT_FLAG_CHIP_P5)
1564                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1565         else
1566                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1567         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1568         if (likely(skb))
1569                 tcp_gro_complete(skb);
1570 #endif
1571         return skb;
1572 }
1573
1574 /* Given the cfa_code of a received packet determine which
1575  * netdev (vf-rep or PF) the packet is destined to.
1576  */
1577 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1578 {
1579         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1580
1581         /* if vf-rep dev is NULL, the must belongs to the PF */
1582         return dev ? dev : bp->dev;
1583 }
1584
1585 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1586                                            struct bnxt_cp_ring_info *cpr,
1587                                            u32 *raw_cons,
1588                                            struct rx_tpa_end_cmp *tpa_end,
1589                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1590                                            u8 *event)
1591 {
1592         struct bnxt_napi *bnapi = cpr->bnapi;
1593         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1594         u8 *data_ptr, agg_bufs;
1595         unsigned int len;
1596         struct bnxt_tpa_info *tpa_info;
1597         dma_addr_t mapping;
1598         struct sk_buff *skb;
1599         u16 idx = 0, agg_id;
1600         void *data;
1601         bool gro;
1602
1603         if (unlikely(bnapi->in_reset)) {
1604                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1605
1606                 if (rc < 0)
1607                         return ERR_PTR(-EBUSY);
1608                 return NULL;
1609         }
1610
1611         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1612                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1613                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1614                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1615                 tpa_info = &rxr->rx_tpa[agg_id];
1616                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1617                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1618                                     agg_bufs, tpa_info->agg_count);
1619                         agg_bufs = tpa_info->agg_count;
1620                 }
1621                 tpa_info->agg_count = 0;
1622                 *event |= BNXT_AGG_EVENT;
1623                 bnxt_free_agg_idx(rxr, agg_id);
1624                 idx = agg_id;
1625                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1626         } else {
1627                 agg_id = TPA_END_AGG_ID(tpa_end);
1628                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1629                 tpa_info = &rxr->rx_tpa[agg_id];
1630                 idx = RING_CMP(*raw_cons);
1631                 if (agg_bufs) {
1632                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1633                                 return ERR_PTR(-EBUSY);
1634
1635                         *event |= BNXT_AGG_EVENT;
1636                         idx = NEXT_CMP(idx);
1637                 }
1638                 gro = !!TPA_END_GRO(tpa_end);
1639         }
1640         data = tpa_info->data;
1641         data_ptr = tpa_info->data_ptr;
1642         prefetch(data_ptr);
1643         len = tpa_info->len;
1644         mapping = tpa_info->mapping;
1645
1646         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1647                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1648                 if (agg_bufs > MAX_SKB_FRAGS)
1649                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1650                                     agg_bufs, (int)MAX_SKB_FRAGS);
1651                 return NULL;
1652         }
1653
1654         if (len <= bp->rx_copy_thresh) {
1655                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1656                 if (!skb) {
1657                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1658                         cpr->sw_stats.rx.rx_oom_discards += 1;
1659                         return NULL;
1660                 }
1661         } else {
1662                 u8 *new_data;
1663                 dma_addr_t new_mapping;
1664
1665                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1666                 if (!new_data) {
1667                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1668                         cpr->sw_stats.rx.rx_oom_discards += 1;
1669                         return NULL;
1670                 }
1671
1672                 tpa_info->data = new_data;
1673                 tpa_info->data_ptr = new_data + bp->rx_offset;
1674                 tpa_info->mapping = new_mapping;
1675
1676                 skb = build_skb(data, 0);
1677                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1678                                        bp->rx_buf_use_size, bp->rx_dir,
1679                                        DMA_ATTR_WEAK_ORDERING);
1680
1681                 if (!skb) {
1682                         kfree(data);
1683                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1684                         cpr->sw_stats.rx.rx_oom_discards += 1;
1685                         return NULL;
1686                 }
1687                 skb_reserve(skb, bp->rx_offset);
1688                 skb_put(skb, len);
1689         }
1690
1691         if (agg_bufs) {
1692                 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1693                 if (!skb) {
1694                         /* Page reuse already handled by bnxt_rx_pages(). */
1695                         cpr->sw_stats.rx.rx_oom_discards += 1;
1696                         return NULL;
1697                 }
1698         }
1699
1700         skb->protocol =
1701                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1702
1703         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1704                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1705
1706         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1707             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1708                 __be16 vlan_proto = htons(tpa_info->metadata >>
1709                                           RX_CMP_FLAGS2_METADATA_TPID_SFT);
1710                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1711
1712                 if (eth_type_vlan(vlan_proto)) {
1713                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1714                 } else {
1715                         dev_kfree_skb(skb);
1716                         return NULL;
1717                 }
1718         }
1719
1720         skb_checksum_none_assert(skb);
1721         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1722                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1723                 skb->csum_level =
1724                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1725         }
1726
1727         if (gro)
1728                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1729
1730         return skb;
1731 }
1732
1733 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1734                          struct rx_agg_cmp *rx_agg)
1735 {
1736         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1737         struct bnxt_tpa_info *tpa_info;
1738
1739         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1740         tpa_info = &rxr->rx_tpa[agg_id];
1741         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1742         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1743 }
1744
1745 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1746                              struct sk_buff *skb)
1747 {
1748         if (skb->dev != bp->dev) {
1749                 /* this packet belongs to a vf-rep */
1750                 bnxt_vf_rep_rx(bp, skb);
1751                 return;
1752         }
1753         skb_record_rx_queue(skb, bnapi->index);
1754         napi_gro_receive(&bnapi->napi, skb);
1755 }
1756
1757 /* returns the following:
1758  * 1       - 1 packet successfully received
1759  * 0       - successful TPA_START, packet not completed yet
1760  * -EBUSY  - completion ring does not have all the agg buffers yet
1761  * -ENOMEM - packet aborted due to out of memory
1762  * -EIO    - packet aborted due to hw error indicated in BD
1763  */
1764 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1765                        u32 *raw_cons, u8 *event)
1766 {
1767         struct bnxt_napi *bnapi = cpr->bnapi;
1768         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1769         struct net_device *dev = bp->dev;
1770         struct rx_cmp *rxcmp;
1771         struct rx_cmp_ext *rxcmp1;
1772         u32 tmp_raw_cons = *raw_cons;
1773         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1774         struct bnxt_sw_rx_bd *rx_buf;
1775         unsigned int len;
1776         u8 *data_ptr, agg_bufs, cmp_type;
1777         dma_addr_t dma_addr;
1778         struct sk_buff *skb;
1779         u32 flags, misc;
1780         void *data;
1781         int rc = 0;
1782
1783         rxcmp = (struct rx_cmp *)
1784                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1785
1786         cmp_type = RX_CMP_TYPE(rxcmp);
1787
1788         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1789                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1790                 goto next_rx_no_prod_no_len;
1791         }
1792
1793         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1794         cp_cons = RING_CMP(tmp_raw_cons);
1795         rxcmp1 = (struct rx_cmp_ext *)
1796                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1797
1798         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1799                 return -EBUSY;
1800
1801         /* The valid test of the entry must be done first before
1802          * reading any further.
1803          */
1804         dma_rmb();
1805         prod = rxr->rx_prod;
1806
1807         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1808                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1809                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1810
1811                 *event |= BNXT_RX_EVENT;
1812                 goto next_rx_no_prod_no_len;
1813
1814         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1815                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1816                                    (struct rx_tpa_end_cmp *)rxcmp,
1817                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1818
1819                 if (IS_ERR(skb))
1820                         return -EBUSY;
1821
1822                 rc = -ENOMEM;
1823                 if (likely(skb)) {
1824                         bnxt_deliver_skb(bp, bnapi, skb);
1825                         rc = 1;
1826                 }
1827                 *event |= BNXT_RX_EVENT;
1828                 goto next_rx_no_prod_no_len;
1829         }
1830
1831         cons = rxcmp->rx_cmp_opaque;
1832         if (unlikely(cons != rxr->rx_next_cons)) {
1833                 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1834
1835                 /* 0xffff is forced error, don't print it */
1836                 if (rxr->rx_next_cons != 0xffff)
1837                         netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1838                                     cons, rxr->rx_next_cons);
1839                 bnxt_sched_reset(bp, rxr);
1840                 if (rc1)
1841                         return rc1;
1842                 goto next_rx_no_prod_no_len;
1843         }
1844         rx_buf = &rxr->rx_buf_ring[cons];
1845         data = rx_buf->data;
1846         data_ptr = rx_buf->data_ptr;
1847         prefetch(data_ptr);
1848
1849         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1850         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1851
1852         if (agg_bufs) {
1853                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1854                         return -EBUSY;
1855
1856                 cp_cons = NEXT_CMP(cp_cons);
1857                 *event |= BNXT_AGG_EVENT;
1858         }
1859         *event |= BNXT_RX_EVENT;
1860
1861         rx_buf->data = NULL;
1862         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1863                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1864
1865                 bnxt_reuse_rx_data(rxr, cons, data);
1866                 if (agg_bufs)
1867                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1868                                                false);
1869
1870                 rc = -EIO;
1871                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1872                         bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1873                         if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1874                             !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1875                                 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1876                                                  rx_err);
1877                                 bnxt_sched_reset(bp, rxr);
1878                         }
1879                 }
1880                 goto next_rx_no_len;
1881         }
1882
1883         flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1884         len = flags >> RX_CMP_LEN_SHIFT;
1885         dma_addr = rx_buf->mapping;
1886
1887         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1888                 rc = 1;
1889                 goto next_rx;
1890         }
1891
1892         if (len <= bp->rx_copy_thresh) {
1893                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1894                 bnxt_reuse_rx_data(rxr, cons, data);
1895                 if (!skb) {
1896                         if (agg_bufs)
1897                                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1898                                                        agg_bufs, false);
1899                         cpr->sw_stats.rx.rx_oom_discards += 1;
1900                         rc = -ENOMEM;
1901                         goto next_rx;
1902                 }
1903         } else {
1904                 u32 payload;
1905
1906                 if (rx_buf->data_ptr == data_ptr)
1907                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1908                 else
1909                         payload = 0;
1910                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1911                                       payload | len);
1912                 if (!skb) {
1913                         cpr->sw_stats.rx.rx_oom_discards += 1;
1914                         rc = -ENOMEM;
1915                         goto next_rx;
1916                 }
1917         }
1918
1919         if (agg_bufs) {
1920                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1921                 if (!skb) {
1922                         cpr->sw_stats.rx.rx_oom_discards += 1;
1923                         rc = -ENOMEM;
1924                         goto next_rx;
1925                 }
1926         }
1927
1928         if (RX_CMP_HASH_VALID(rxcmp)) {
1929                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1930                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1931
1932                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1933                 if (hash_type != 1 && hash_type != 3)
1934                         type = PKT_HASH_TYPE_L3;
1935                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1936         }
1937
1938         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1939         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1940
1941         if ((rxcmp1->rx_cmp_flags2 &
1942              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1943             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1944                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1945                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1946                 __be16 vlan_proto = htons(meta_data >>
1947                                           RX_CMP_FLAGS2_METADATA_TPID_SFT);
1948
1949                 if (eth_type_vlan(vlan_proto)) {
1950                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1951                 } else {
1952                         dev_kfree_skb(skb);
1953                         goto next_rx;
1954                 }
1955         }
1956
1957         skb_checksum_none_assert(skb);
1958         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1959                 if (dev->features & NETIF_F_RXCSUM) {
1960                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1961                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1962                 }
1963         } else {
1964                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1965                         if (dev->features & NETIF_F_RXCSUM)
1966                                 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1967                 }
1968         }
1969
1970         if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1971                      RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1972                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1973                         u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1974                         u64 ns, ts;
1975
1976                         if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1977                                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1978
1979                                 spin_lock_bh(&ptp->ptp_lock);
1980                                 ns = timecounter_cyc2time(&ptp->tc, ts);
1981                                 spin_unlock_bh(&ptp->ptp_lock);
1982                                 memset(skb_hwtstamps(skb), 0,
1983                                        sizeof(*skb_hwtstamps(skb)));
1984                                 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1985                         }
1986                 }
1987         }
1988         bnxt_deliver_skb(bp, bnapi, skb);
1989         rc = 1;
1990
1991 next_rx:
1992         cpr->rx_packets += 1;
1993         cpr->rx_bytes += len;
1994
1995 next_rx_no_len:
1996         rxr->rx_prod = NEXT_RX(prod);
1997         rxr->rx_next_cons = NEXT_RX(cons);
1998
1999 next_rx_no_prod_no_len:
2000         *raw_cons = tmp_raw_cons;
2001
2002         return rc;
2003 }
2004
2005 /* In netpoll mode, if we are using a combined completion ring, we need to
2006  * discard the rx packets and recycle the buffers.
2007  */
2008 static int bnxt_force_rx_discard(struct bnxt *bp,
2009                                  struct bnxt_cp_ring_info *cpr,
2010                                  u32 *raw_cons, u8 *event)
2011 {
2012         u32 tmp_raw_cons = *raw_cons;
2013         struct rx_cmp_ext *rxcmp1;
2014         struct rx_cmp *rxcmp;
2015         u16 cp_cons;
2016         u8 cmp_type;
2017         int rc;
2018
2019         cp_cons = RING_CMP(tmp_raw_cons);
2020         rxcmp = (struct rx_cmp *)
2021                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2022
2023         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2024         cp_cons = RING_CMP(tmp_raw_cons);
2025         rxcmp1 = (struct rx_cmp_ext *)
2026                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2027
2028         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2029                 return -EBUSY;
2030
2031         /* The valid test of the entry must be done first before
2032          * reading any further.
2033          */
2034         dma_rmb();
2035         cmp_type = RX_CMP_TYPE(rxcmp);
2036         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2037                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2038                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2039         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2040                 struct rx_tpa_end_cmp_ext *tpa_end1;
2041
2042                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2043                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2044                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2045         }
2046         rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2047         if (rc && rc != -EBUSY)
2048                 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2049         return rc;
2050 }
2051
2052 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2053 {
2054         struct bnxt_fw_health *fw_health = bp->fw_health;
2055         u32 reg = fw_health->regs[reg_idx];
2056         u32 reg_type, reg_off, val = 0;
2057
2058         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2059         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2060         switch (reg_type) {
2061         case BNXT_FW_HEALTH_REG_TYPE_CFG:
2062                 pci_read_config_dword(bp->pdev, reg_off, &val);
2063                 break;
2064         case BNXT_FW_HEALTH_REG_TYPE_GRC:
2065                 reg_off = fw_health->mapped_regs[reg_idx];
2066                 fallthrough;
2067         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2068                 val = readl(bp->bar0 + reg_off);
2069                 break;
2070         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2071                 val = readl(bp->bar1 + reg_off);
2072                 break;
2073         }
2074         if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2075                 val &= fw_health->fw_reset_inprog_reg_mask;
2076         return val;
2077 }
2078
2079 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2080 {
2081         int i;
2082
2083         for (i = 0; i < bp->rx_nr_rings; i++) {
2084                 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2085                 struct bnxt_ring_grp_info *grp_info;
2086
2087                 grp_info = &bp->grp_info[grp_idx];
2088                 if (grp_info->agg_fw_ring_id == ring_id)
2089                         return grp_idx;
2090         }
2091         return INVALID_HW_RING_ID;
2092 }
2093
2094 static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2095 {
2096         switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
2097         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2098                 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2099                            BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2100                 break;
2101         default:
2102                 netdev_err(bp->dev, "FW reported unknown error type\n");
2103                 break;
2104         }
2105 }
2106
2107 #define BNXT_GET_EVENT_PORT(data)       \
2108         ((data) &                       \
2109          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2110
2111 #define BNXT_EVENT_RING_TYPE(data2)     \
2112         ((data2) &                      \
2113          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2114
2115 #define BNXT_EVENT_RING_TYPE_RX(data2)  \
2116         (BNXT_EVENT_RING_TYPE(data2) == \
2117          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2118
2119 static int bnxt_async_event_process(struct bnxt *bp,
2120                                     struct hwrm_async_event_cmpl *cmpl)
2121 {
2122         u16 event_id = le16_to_cpu(cmpl->event_id);
2123         u32 data1 = le32_to_cpu(cmpl->event_data1);
2124         u32 data2 = le32_to_cpu(cmpl->event_data2);
2125
2126         /* TODO CHIMP_FW: Define event id's for link change, error etc */
2127         switch (event_id) {
2128         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2129                 struct bnxt_link_info *link_info = &bp->link_info;
2130
2131                 if (BNXT_VF(bp))
2132                         goto async_event_process_exit;
2133
2134                 /* print unsupported speed warning in forced speed mode only */
2135                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2136                     (data1 & 0x20000)) {
2137                         u16 fw_speed = link_info->force_link_speed;
2138                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2139
2140                         if (speed != SPEED_UNKNOWN)
2141                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2142                                             speed);
2143                 }
2144                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2145         }
2146                 fallthrough;
2147         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2148         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2149                 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2150                 fallthrough;
2151         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2152                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2153                 break;
2154         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2155                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2156                 break;
2157         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2158                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2159
2160                 if (BNXT_VF(bp))
2161                         break;
2162
2163                 if (bp->pf.port_id != port_id)
2164                         break;
2165
2166                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2167                 break;
2168         }
2169         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2170                 if (BNXT_PF(bp))
2171                         goto async_event_process_exit;
2172                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2173                 break;
2174         case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2175                 char *fatal_str = "non-fatal";
2176
2177                 if (!bp->fw_health)
2178                         goto async_event_process_exit;
2179
2180                 bp->fw_reset_timestamp = jiffies;
2181                 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2182                 if (!bp->fw_reset_min_dsecs)
2183                         bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2184                 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2185                 if (!bp->fw_reset_max_dsecs)
2186                         bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2187                 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2188                         fatal_str = "fatal";
2189                         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2190                 }
2191                 netif_warn(bp, hw, bp->dev,
2192                            "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2193                            fatal_str, data1, data2,
2194                            bp->fw_reset_min_dsecs * 100,
2195                            bp->fw_reset_max_dsecs * 100);
2196                 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2197                 break;
2198         }
2199         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2200                 struct bnxt_fw_health *fw_health = bp->fw_health;
2201
2202                 if (!fw_health)
2203                         goto async_event_process_exit;
2204
2205                 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2206                         fw_health->enabled = false;
2207                         netif_info(bp, drv, bp->dev,
2208                                    "Error recovery info: error recovery[0]\n");
2209                         break;
2210                 }
2211                 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2212                 fw_health->tmr_multiplier =
2213                         DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2214                                      bp->current_interval * 10);
2215                 fw_health->tmr_counter = fw_health->tmr_multiplier;
2216                 if (!fw_health->enabled) {
2217                         fw_health->last_fw_heartbeat =
2218                                 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2219                         fw_health->last_fw_reset_cnt =
2220                                 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2221                 }
2222                 netif_info(bp, drv, bp->dev,
2223                            "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2224                            fw_health->master, fw_health->last_fw_reset_cnt,
2225                            bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2226                 if (!fw_health->enabled) {
2227                         /* Make sure tmr_counter is set and visible to
2228                          * bnxt_health_check() before setting enabled to true.
2229                          */
2230                         smp_wmb();
2231                         fw_health->enabled = true;
2232                 }
2233                 goto async_event_process_exit;
2234         }
2235         case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2236                 netif_notice(bp, hw, bp->dev,
2237                              "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2238                              data1, data2);
2239                 goto async_event_process_exit;
2240         case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2241                 struct bnxt_rx_ring_info *rxr;
2242                 u16 grp_idx;
2243
2244                 if (bp->flags & BNXT_FLAG_CHIP_P5)
2245                         goto async_event_process_exit;
2246
2247                 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2248                             BNXT_EVENT_RING_TYPE(data2), data1);
2249                 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2250                         goto async_event_process_exit;
2251
2252                 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2253                 if (grp_idx == INVALID_HW_RING_ID) {
2254                         netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2255                                     data1);
2256                         goto async_event_process_exit;
2257                 }
2258                 rxr = bp->bnapi[grp_idx]->rx_ring;
2259                 bnxt_sched_reset(bp, rxr);
2260                 goto async_event_process_exit;
2261         }
2262         case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2263                 struct bnxt_fw_health *fw_health = bp->fw_health;
2264
2265                 netif_notice(bp, hw, bp->dev,
2266                              "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2267                              data1, data2);
2268                 if (fw_health) {
2269                         fw_health->echo_req_data1 = data1;
2270                         fw_health->echo_req_data2 = data2;
2271                         set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2272                         break;
2273                 }
2274                 goto async_event_process_exit;
2275         }
2276         case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2277                 bnxt_ptp_pps_event(bp, data1, data2);
2278                 goto async_event_process_exit;
2279         }
2280         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2281                 bnxt_event_error_report(bp, data1, data2);
2282                 goto async_event_process_exit;
2283         }
2284         case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2285                 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2286
2287                 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2288                 goto async_event_process_exit;
2289         }
2290         default:
2291                 goto async_event_process_exit;
2292         }
2293         bnxt_queue_sp_work(bp);
2294 async_event_process_exit:
2295         bnxt_ulp_async_events(bp, cmpl);
2296         return 0;
2297 }
2298
2299 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2300 {
2301         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2302         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2303         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2304                                 (struct hwrm_fwd_req_cmpl *)txcmp;
2305
2306         switch (cmpl_type) {
2307         case CMPL_BASE_TYPE_HWRM_DONE:
2308                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2309                 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2310                 break;
2311
2312         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2313                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2314
2315                 if ((vf_id < bp->pf.first_vf_id) ||
2316                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2317                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2318                                    vf_id);
2319                         return -EINVAL;
2320                 }
2321
2322                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2323                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2324                 bnxt_queue_sp_work(bp);
2325                 break;
2326
2327         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2328                 bnxt_async_event_process(bp,
2329                                          (struct hwrm_async_event_cmpl *)txcmp);
2330                 break;
2331
2332         default:
2333                 break;
2334         }
2335
2336         return 0;
2337 }
2338
2339 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2340 {
2341         struct bnxt_napi *bnapi = dev_instance;
2342         struct bnxt *bp = bnapi->bp;
2343         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2344         u32 cons = RING_CMP(cpr->cp_raw_cons);
2345
2346         cpr->event_ctr++;
2347         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2348         napi_schedule(&bnapi->napi);
2349         return IRQ_HANDLED;
2350 }
2351
2352 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2353 {
2354         u32 raw_cons = cpr->cp_raw_cons;
2355         u16 cons = RING_CMP(raw_cons);
2356         struct tx_cmp *txcmp;
2357
2358         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2359
2360         return TX_CMP_VALID(txcmp, raw_cons);
2361 }
2362
2363 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2364 {
2365         struct bnxt_napi *bnapi = dev_instance;
2366         struct bnxt *bp = bnapi->bp;
2367         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2368         u32 cons = RING_CMP(cpr->cp_raw_cons);
2369         u32 int_status;
2370
2371         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2372
2373         if (!bnxt_has_work(bp, cpr)) {
2374                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2375                 /* return if erroneous interrupt */
2376                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2377                         return IRQ_NONE;
2378         }
2379
2380         /* disable ring IRQ */
2381         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2382
2383         /* Return here if interrupt is shared and is disabled. */
2384         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2385                 return IRQ_HANDLED;
2386
2387         napi_schedule(&bnapi->napi);
2388         return IRQ_HANDLED;
2389 }
2390
2391 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2392                             int budget)
2393 {
2394         struct bnxt_napi *bnapi = cpr->bnapi;
2395         u32 raw_cons = cpr->cp_raw_cons;
2396         u32 cons;
2397         int tx_pkts = 0;
2398         int rx_pkts = 0;
2399         u8 event = 0;
2400         struct tx_cmp *txcmp;
2401
2402         cpr->has_more_work = 0;
2403         cpr->had_work_done = 1;
2404         while (1) {
2405                 int rc;
2406
2407                 cons = RING_CMP(raw_cons);
2408                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2409
2410                 if (!TX_CMP_VALID(txcmp, raw_cons))
2411                         break;
2412
2413                 /* The valid test of the entry must be done first before
2414                  * reading any further.
2415                  */
2416                 dma_rmb();
2417                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2418                         tx_pkts++;
2419                         /* return full budget so NAPI will complete. */
2420                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2421                                 rx_pkts = budget;
2422                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2423                                 if (budget)
2424                                         cpr->has_more_work = 1;
2425                                 break;
2426                         }
2427                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2428                         if (likely(budget))
2429                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2430                         else
2431                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2432                                                            &event);
2433                         if (likely(rc >= 0))
2434                                 rx_pkts += rc;
2435                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2436                          * the NAPI budget.  Otherwise, we may potentially loop
2437                          * here forever if we consistently cannot allocate
2438                          * buffers.
2439                          */
2440                         else if (rc == -ENOMEM && budget)
2441                                 rx_pkts++;
2442                         else if (rc == -EBUSY)  /* partial completion */
2443                                 break;
2444                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2445                                      CMPL_BASE_TYPE_HWRM_DONE) ||
2446                                     (TX_CMP_TYPE(txcmp) ==
2447                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2448                                     (TX_CMP_TYPE(txcmp) ==
2449                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2450                         bnxt_hwrm_handler(bp, txcmp);
2451                 }
2452                 raw_cons = NEXT_RAW_CMP(raw_cons);
2453
2454                 if (rx_pkts && rx_pkts == budget) {
2455                         cpr->has_more_work = 1;
2456                         break;
2457                 }
2458         }
2459
2460         if (event & BNXT_REDIRECT_EVENT)
2461                 xdp_do_flush_map();
2462
2463         if (event & BNXT_TX_EVENT) {
2464                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2465                 u16 prod = txr->tx_prod;
2466
2467                 /* Sync BD data before updating doorbell */
2468                 wmb();
2469
2470                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2471         }
2472
2473         cpr->cp_raw_cons = raw_cons;
2474         bnapi->tx_pkts += tx_pkts;
2475         bnapi->events |= event;
2476         return rx_pkts;
2477 }
2478
2479 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2480 {
2481         if (bnapi->tx_pkts) {
2482                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2483                 bnapi->tx_pkts = 0;
2484         }
2485
2486         if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2487                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2488
2489                 if (bnapi->events & BNXT_AGG_EVENT)
2490                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2491                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2492         }
2493         bnapi->events = 0;
2494 }
2495
2496 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2497                           int budget)
2498 {
2499         struct bnxt_napi *bnapi = cpr->bnapi;
2500         int rx_pkts;
2501
2502         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2503
2504         /* ACK completion ring before freeing tx ring and producing new
2505          * buffers in rx/agg rings to prevent overflowing the completion
2506          * ring.
2507          */
2508         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2509
2510         __bnxt_poll_work_done(bp, bnapi);
2511         return rx_pkts;
2512 }
2513
2514 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2515 {
2516         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2517         struct bnxt *bp = bnapi->bp;
2518         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2519         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2520         struct tx_cmp *txcmp;
2521         struct rx_cmp_ext *rxcmp1;
2522         u32 cp_cons, tmp_raw_cons;
2523         u32 raw_cons = cpr->cp_raw_cons;
2524         u32 rx_pkts = 0;
2525         u8 event = 0;
2526
2527         while (1) {
2528                 int rc;
2529
2530                 cp_cons = RING_CMP(raw_cons);
2531                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2532
2533                 if (!TX_CMP_VALID(txcmp, raw_cons))
2534                         break;
2535
2536                 /* The valid test of the entry must be done first before
2537                  * reading any further.
2538                  */
2539                 dma_rmb();
2540                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2541                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2542                         cp_cons = RING_CMP(tmp_raw_cons);
2543                         rxcmp1 = (struct rx_cmp_ext *)
2544                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2545
2546                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2547                                 break;
2548
2549                         /* force an error to recycle the buffer */
2550                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2551                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2552
2553                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2554                         if (likely(rc == -EIO) && budget)
2555                                 rx_pkts++;
2556                         else if (rc == -EBUSY)  /* partial completion */
2557                                 break;
2558                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2559                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2560                         bnxt_hwrm_handler(bp, txcmp);
2561                 } else {
2562                         netdev_err(bp->dev,
2563                                    "Invalid completion received on special ring\n");
2564                 }
2565                 raw_cons = NEXT_RAW_CMP(raw_cons);
2566
2567                 if (rx_pkts == budget)
2568                         break;
2569         }
2570
2571         cpr->cp_raw_cons = raw_cons;
2572         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2573         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2574
2575         if (event & BNXT_AGG_EVENT)
2576                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2577
2578         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2579                 napi_complete_done(napi, rx_pkts);
2580                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2581         }
2582         return rx_pkts;
2583 }
2584
2585 static int bnxt_poll(struct napi_struct *napi, int budget)
2586 {
2587         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2588         struct bnxt *bp = bnapi->bp;
2589         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2590         int work_done = 0;
2591
2592         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2593                 napi_complete(napi);
2594                 return 0;
2595         }
2596         while (1) {
2597                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2598
2599                 if (work_done >= budget) {
2600                         if (!budget)
2601                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2602                         break;
2603                 }
2604
2605                 if (!bnxt_has_work(bp, cpr)) {
2606                         if (napi_complete_done(napi, work_done))
2607                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2608                         break;
2609                 }
2610         }
2611         if (bp->flags & BNXT_FLAG_DIM) {
2612                 struct dim_sample dim_sample = {};
2613
2614                 dim_update_sample(cpr->event_ctr,
2615                                   cpr->rx_packets,
2616                                   cpr->rx_bytes,
2617                                   &dim_sample);
2618                 net_dim(&cpr->dim, dim_sample);
2619         }
2620         return work_done;
2621 }
2622
2623 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2624 {
2625         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2626         int i, work_done = 0;
2627
2628         for (i = 0; i < 2; i++) {
2629                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2630
2631                 if (cpr2) {
2632                         work_done += __bnxt_poll_work(bp, cpr2,
2633                                                       budget - work_done);
2634                         cpr->has_more_work |= cpr2->has_more_work;
2635                 }
2636         }
2637         return work_done;
2638 }
2639
2640 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2641                                  u64 dbr_type)
2642 {
2643         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2644         int i;
2645
2646         for (i = 0; i < 2; i++) {
2647                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2648                 struct bnxt_db_info *db;
2649
2650                 if (cpr2 && cpr2->had_work_done) {
2651                         db = &cpr2->cp_db;
2652                         bnxt_writeq(bp, db->db_key64 | dbr_type |
2653                                     RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2654                         cpr2->had_work_done = 0;
2655                 }
2656         }
2657         __bnxt_poll_work_done(bp, bnapi);
2658 }
2659
2660 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2661 {
2662         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2663         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2664         u32 raw_cons = cpr->cp_raw_cons;
2665         struct bnxt *bp = bnapi->bp;
2666         struct nqe_cn *nqcmp;
2667         int work_done = 0;
2668         u32 cons;
2669
2670         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2671                 napi_complete(napi);
2672                 return 0;
2673         }
2674         if (cpr->has_more_work) {
2675                 cpr->has_more_work = 0;
2676                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2677         }
2678         while (1) {
2679                 cons = RING_CMP(raw_cons);
2680                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2681
2682                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2683                         if (cpr->has_more_work)
2684                                 break;
2685
2686                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2687                         cpr->cp_raw_cons = raw_cons;
2688                         if (napi_complete_done(napi, work_done))
2689                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2690                                                   cpr->cp_raw_cons);
2691                         return work_done;
2692                 }
2693
2694                 /* The valid test of the entry must be done first before
2695                  * reading any further.
2696                  */
2697                 dma_rmb();
2698
2699                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2700                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2701                         struct bnxt_cp_ring_info *cpr2;
2702
2703                         cpr2 = cpr->cp_ring_arr[idx];
2704                         work_done += __bnxt_poll_work(bp, cpr2,
2705                                                       budget - work_done);
2706                         cpr->has_more_work |= cpr2->has_more_work;
2707                 } else {
2708                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2709                 }
2710                 raw_cons = NEXT_RAW_CMP(raw_cons);
2711         }
2712         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2713         if (raw_cons != cpr->cp_raw_cons) {
2714                 cpr->cp_raw_cons = raw_cons;
2715                 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2716         }
2717         return work_done;
2718 }
2719
2720 static void bnxt_free_tx_skbs(struct bnxt *bp)
2721 {
2722         int i, max_idx;
2723         struct pci_dev *pdev = bp->pdev;
2724
2725         if (!bp->tx_ring)
2726                 return;
2727
2728         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2729         for (i = 0; i < bp->tx_nr_rings; i++) {
2730                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2731                 int j;
2732
2733                 for (j = 0; j < max_idx;) {
2734                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2735                         struct sk_buff *skb;
2736                         int k, last;
2737
2738                         if (i < bp->tx_nr_rings_xdp &&
2739                             tx_buf->action == XDP_REDIRECT) {
2740                                 dma_unmap_single(&pdev->dev,
2741                                         dma_unmap_addr(tx_buf, mapping),
2742                                         dma_unmap_len(tx_buf, len),
2743                                         DMA_TO_DEVICE);
2744                                 xdp_return_frame(tx_buf->xdpf);
2745                                 tx_buf->action = 0;
2746                                 tx_buf->xdpf = NULL;
2747                                 j++;
2748                                 continue;
2749                         }
2750
2751                         skb = tx_buf->skb;
2752                         if (!skb) {
2753                                 j++;
2754                                 continue;
2755                         }
2756
2757                         tx_buf->skb = NULL;
2758
2759                         if (tx_buf->is_push) {
2760                                 dev_kfree_skb(skb);
2761                                 j += 2;
2762                                 continue;
2763                         }
2764
2765                         dma_unmap_single(&pdev->dev,
2766                                          dma_unmap_addr(tx_buf, mapping),
2767                                          skb_headlen(skb),
2768                                          DMA_TO_DEVICE);
2769
2770                         last = tx_buf->nr_frags;
2771                         j += 2;
2772                         for (k = 0; k < last; k++, j++) {
2773                                 int ring_idx = j & bp->tx_ring_mask;
2774                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2775
2776                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2777                                 dma_unmap_page(
2778                                         &pdev->dev,
2779                                         dma_unmap_addr(tx_buf, mapping),
2780                                         skb_frag_size(frag), DMA_TO_DEVICE);
2781                         }
2782                         dev_kfree_skb(skb);
2783                 }
2784                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2785         }
2786 }
2787
2788 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2789 {
2790         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2791         struct pci_dev *pdev = bp->pdev;
2792         struct bnxt_tpa_idx_map *map;
2793         int i, max_idx, max_agg_idx;
2794
2795         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2796         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2797         if (!rxr->rx_tpa)
2798                 goto skip_rx_tpa_free;
2799
2800         for (i = 0; i < bp->max_tpa; i++) {
2801                 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2802                 u8 *data = tpa_info->data;
2803
2804                 if (!data)
2805                         continue;
2806
2807                 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2808                                        bp->rx_buf_use_size, bp->rx_dir,
2809                                        DMA_ATTR_WEAK_ORDERING);
2810
2811                 tpa_info->data = NULL;
2812
2813                 kfree(data);
2814         }
2815
2816 skip_rx_tpa_free:
2817         for (i = 0; i < max_idx; i++) {
2818                 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2819                 dma_addr_t mapping = rx_buf->mapping;
2820                 void *data = rx_buf->data;
2821
2822                 if (!data)
2823                         continue;
2824
2825                 rx_buf->data = NULL;
2826                 if (BNXT_RX_PAGE_MODE(bp)) {
2827                         mapping -= bp->rx_dma_offset;
2828                         dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2829                                              bp->rx_dir,
2830                                              DMA_ATTR_WEAK_ORDERING);
2831                         page_pool_recycle_direct(rxr->page_pool, data);
2832                 } else {
2833                         dma_unmap_single_attrs(&pdev->dev, mapping,
2834                                                bp->rx_buf_use_size, bp->rx_dir,
2835                                                DMA_ATTR_WEAK_ORDERING);
2836                         kfree(data);
2837                 }
2838         }
2839         for (i = 0; i < max_agg_idx; i++) {
2840                 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2841                 struct page *page = rx_agg_buf->page;
2842
2843                 if (!page)
2844                         continue;
2845
2846                 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2847                                      BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
2848                                      DMA_ATTR_WEAK_ORDERING);
2849
2850                 rx_agg_buf->page = NULL;
2851                 __clear_bit(i, rxr->rx_agg_bmap);
2852
2853                 __free_page(page);
2854         }
2855         if (rxr->rx_page) {
2856                 __free_page(rxr->rx_page);
2857                 rxr->rx_page = NULL;
2858         }
2859         map = rxr->rx_tpa_idx_map;
2860         if (map)
2861                 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2862 }
2863
2864 static void bnxt_free_rx_skbs(struct bnxt *bp)
2865 {
2866         int i;
2867
2868         if (!bp->rx_ring)
2869                 return;
2870
2871         for (i = 0; i < bp->rx_nr_rings; i++)
2872                 bnxt_free_one_rx_ring_skbs(bp, i);
2873 }
2874
2875 static void bnxt_free_skbs(struct bnxt *bp)
2876 {
2877         bnxt_free_tx_skbs(bp);
2878         bnxt_free_rx_skbs(bp);
2879 }
2880
2881 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2882 {
2883         u8 init_val = mem_init->init_val;
2884         u16 offset = mem_init->offset;
2885         u8 *p2 = p;
2886         int i;
2887
2888         if (!init_val)
2889                 return;
2890         if (offset == BNXT_MEM_INVALID_OFFSET) {
2891                 memset(p, init_val, len);
2892                 return;
2893         }
2894         for (i = 0; i < len; i += mem_init->size)
2895                 *(p2 + i + offset) = init_val;
2896 }
2897
2898 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2899 {
2900         struct pci_dev *pdev = bp->pdev;
2901         int i;
2902
2903         for (i = 0; i < rmem->nr_pages; i++) {
2904                 if (!rmem->pg_arr[i])
2905                         continue;
2906
2907                 dma_free_coherent(&pdev->dev, rmem->page_size,
2908                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2909
2910                 rmem->pg_arr[i] = NULL;
2911         }
2912         if (rmem->pg_tbl) {
2913                 size_t pg_tbl_size = rmem->nr_pages * 8;
2914
2915                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2916                         pg_tbl_size = rmem->page_size;
2917                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2918                                   rmem->pg_tbl, rmem->pg_tbl_map);
2919                 rmem->pg_tbl = NULL;
2920         }
2921         if (rmem->vmem_size && *rmem->vmem) {
2922                 vfree(*rmem->vmem);
2923                 *rmem->vmem = NULL;
2924         }
2925 }
2926
2927 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2928 {
2929         struct pci_dev *pdev = bp->pdev;
2930         u64 valid_bit = 0;
2931         int i;
2932
2933         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2934                 valid_bit = PTU_PTE_VALID;
2935         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2936                 size_t pg_tbl_size = rmem->nr_pages * 8;
2937
2938                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2939                         pg_tbl_size = rmem->page_size;
2940                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2941                                                   &rmem->pg_tbl_map,
2942                                                   GFP_KERNEL);
2943                 if (!rmem->pg_tbl)
2944                         return -ENOMEM;
2945         }
2946
2947         for (i = 0; i < rmem->nr_pages; i++) {
2948                 u64 extra_bits = valid_bit;
2949
2950                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2951                                                      rmem->page_size,
2952                                                      &rmem->dma_arr[i],
2953                                                      GFP_KERNEL);
2954                 if (!rmem->pg_arr[i])
2955                         return -ENOMEM;
2956
2957                 if (rmem->mem_init)
2958                         bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2959                                           rmem->page_size);
2960                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2961                         if (i == rmem->nr_pages - 2 &&
2962                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2963                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2964                         else if (i == rmem->nr_pages - 1 &&
2965                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2966                                 extra_bits |= PTU_PTE_LAST;
2967                         rmem->pg_tbl[i] =
2968                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2969                 }
2970         }
2971
2972         if (rmem->vmem_size) {
2973                 *rmem->vmem = vzalloc(rmem->vmem_size);
2974                 if (!(*rmem->vmem))
2975                         return -ENOMEM;
2976         }
2977         return 0;
2978 }
2979
2980 static void bnxt_free_tpa_info(struct bnxt *bp)
2981 {
2982         int i;
2983
2984         for (i = 0; i < bp->rx_nr_rings; i++) {
2985                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2986
2987                 kfree(rxr->rx_tpa_idx_map);
2988                 rxr->rx_tpa_idx_map = NULL;
2989                 if (rxr->rx_tpa) {
2990                         kfree(rxr->rx_tpa[0].agg_arr);
2991                         rxr->rx_tpa[0].agg_arr = NULL;
2992                 }
2993                 kfree(rxr->rx_tpa);
2994                 rxr->rx_tpa = NULL;
2995         }
2996 }
2997
2998 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2999 {
3000         int i, j, total_aggs = 0;
3001
3002         bp->max_tpa = MAX_TPA;
3003         if (bp->flags & BNXT_FLAG_CHIP_P5) {
3004                 if (!bp->max_tpa_v2)
3005                         return 0;
3006                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3007                 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
3008         }
3009
3010         for (i = 0; i < bp->rx_nr_rings; i++) {
3011                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3012                 struct rx_agg_cmp *agg;
3013
3014                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3015                                       GFP_KERNEL);
3016                 if (!rxr->rx_tpa)
3017                         return -ENOMEM;
3018
3019                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3020                         continue;
3021                 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3022                 rxr->rx_tpa[0].agg_arr = agg;
3023                 if (!agg)
3024                         return -ENOMEM;
3025                 for (j = 1; j < bp->max_tpa; j++)
3026                         rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
3027                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3028                                               GFP_KERNEL);
3029                 if (!rxr->rx_tpa_idx_map)
3030                         return -ENOMEM;
3031         }
3032         return 0;
3033 }
3034
3035 static void bnxt_free_rx_rings(struct bnxt *bp)
3036 {
3037         int i;
3038
3039         if (!bp->rx_ring)
3040                 return;
3041
3042         bnxt_free_tpa_info(bp);
3043         for (i = 0; i < bp->rx_nr_rings; i++) {
3044                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3045                 struct bnxt_ring_struct *ring;
3046
3047                 if (rxr->xdp_prog)
3048                         bpf_prog_put(rxr->xdp_prog);
3049
3050                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3051                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3052
3053                 page_pool_destroy(rxr->page_pool);
3054                 rxr->page_pool = NULL;
3055
3056                 kfree(rxr->rx_agg_bmap);
3057                 rxr->rx_agg_bmap = NULL;
3058
3059                 ring = &rxr->rx_ring_struct;
3060                 bnxt_free_ring(bp, &ring->ring_mem);
3061
3062                 ring = &rxr->rx_agg_ring_struct;
3063                 bnxt_free_ring(bp, &ring->ring_mem);
3064         }
3065 }
3066
3067 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3068                                    struct bnxt_rx_ring_info *rxr)
3069 {
3070         struct page_pool_params pp = { 0 };
3071
3072         pp.pool_size = bp->rx_ring_size;
3073         pp.nid = dev_to_node(&bp->pdev->dev);
3074         pp.dev = &bp->pdev->dev;
3075         pp.dma_dir = DMA_BIDIRECTIONAL;
3076
3077         rxr->page_pool = page_pool_create(&pp);
3078         if (IS_ERR(rxr->page_pool)) {
3079                 int err = PTR_ERR(rxr->page_pool);
3080
3081                 rxr->page_pool = NULL;
3082                 return err;
3083         }
3084         return 0;
3085 }
3086
3087 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3088 {
3089         int i, rc = 0, agg_rings = 0;
3090
3091         if (!bp->rx_ring)
3092                 return -ENOMEM;
3093
3094         if (bp->flags & BNXT_FLAG_AGG_RINGS)
3095                 agg_rings = 1;
3096
3097         for (i = 0; i < bp->rx_nr_rings; i++) {
3098                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3099                 struct bnxt_ring_struct *ring;
3100
3101                 ring = &rxr->rx_ring_struct;
3102
3103                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3104                 if (rc)
3105                         return rc;
3106
3107                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3108                 if (rc < 0)
3109                         return rc;
3110
3111                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3112                                                 MEM_TYPE_PAGE_POOL,
3113                                                 rxr->page_pool);
3114                 if (rc) {
3115                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3116                         return rc;
3117                 }
3118
3119                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3120                 if (rc)
3121                         return rc;
3122
3123                 ring->grp_idx = i;
3124                 if (agg_rings) {
3125                         u16 mem_size;
3126
3127                         ring = &rxr->rx_agg_ring_struct;
3128                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3129                         if (rc)
3130                                 return rc;
3131
3132                         ring->grp_idx = i;
3133                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3134                         mem_size = rxr->rx_agg_bmap_size / 8;
3135                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3136                         if (!rxr->rx_agg_bmap)
3137                                 return -ENOMEM;
3138                 }
3139         }
3140         if (bp->flags & BNXT_FLAG_TPA)
3141                 rc = bnxt_alloc_tpa_info(bp);
3142         return rc;
3143 }
3144
3145 static void bnxt_free_tx_rings(struct bnxt *bp)
3146 {
3147         int i;
3148         struct pci_dev *pdev = bp->pdev;
3149
3150         if (!bp->tx_ring)
3151                 return;
3152
3153         for (i = 0; i < bp->tx_nr_rings; i++) {
3154                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3155                 struct bnxt_ring_struct *ring;
3156
3157                 if (txr->tx_push) {
3158                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
3159                                           txr->tx_push, txr->tx_push_mapping);
3160                         txr->tx_push = NULL;
3161                 }
3162
3163                 ring = &txr->tx_ring_struct;
3164
3165                 bnxt_free_ring(bp, &ring->ring_mem);
3166         }
3167 }
3168
3169 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3170 {
3171         int i, j, rc;
3172         struct pci_dev *pdev = bp->pdev;
3173
3174         bp->tx_push_size = 0;
3175         if (bp->tx_push_thresh) {
3176                 int push_size;
3177
3178                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3179                                         bp->tx_push_thresh);
3180
3181                 if (push_size > 256) {
3182                         push_size = 0;
3183                         bp->tx_push_thresh = 0;
3184                 }
3185
3186                 bp->tx_push_size = push_size;
3187         }
3188
3189         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3190                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3191                 struct bnxt_ring_struct *ring;
3192                 u8 qidx;
3193
3194                 ring = &txr->tx_ring_struct;
3195
3196                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3197                 if (rc)
3198                         return rc;
3199
3200                 ring->grp_idx = txr->bnapi->index;
3201                 if (bp->tx_push_size) {
3202                         dma_addr_t mapping;
3203
3204                         /* One pre-allocated DMA buffer to backup
3205                          * TX push operation
3206                          */
3207                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
3208                                                 bp->tx_push_size,
3209                                                 &txr->tx_push_mapping,
3210                                                 GFP_KERNEL);
3211
3212                         if (!txr->tx_push)
3213                                 return -ENOMEM;
3214
3215                         mapping = txr->tx_push_mapping +
3216                                 sizeof(struct tx_push_bd);
3217                         txr->data_mapping = cpu_to_le64(mapping);
3218                 }
3219                 qidx = bp->tc_to_qidx[j];
3220                 ring->queue_id = bp->q_info[qidx].queue_id;
3221                 if (i < bp->tx_nr_rings_xdp)
3222                         continue;
3223                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3224                         j++;
3225         }
3226         return 0;
3227 }
3228
3229 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3230 {
3231         kfree(cpr->cp_desc_ring);
3232         cpr->cp_desc_ring = NULL;
3233         kfree(cpr->cp_desc_mapping);
3234         cpr->cp_desc_mapping = NULL;
3235 }
3236
3237 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3238 {
3239         cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3240         if (!cpr->cp_desc_ring)
3241                 return -ENOMEM;
3242         cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3243                                        GFP_KERNEL);
3244         if (!cpr->cp_desc_mapping)
3245                 return -ENOMEM;
3246         return 0;
3247 }
3248
3249 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3250 {
3251         int i;
3252
3253         if (!bp->bnapi)
3254                 return;
3255         for (i = 0; i < bp->cp_nr_rings; i++) {
3256                 struct bnxt_napi *bnapi = bp->bnapi[i];
3257
3258                 if (!bnapi)
3259                         continue;
3260                 bnxt_free_cp_arrays(&bnapi->cp_ring);
3261         }
3262 }
3263
3264 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3265 {
3266         int i, n = bp->cp_nr_pages;
3267
3268         for (i = 0; i < bp->cp_nr_rings; i++) {
3269                 struct bnxt_napi *bnapi = bp->bnapi[i];
3270                 int rc;
3271
3272                 if (!bnapi)
3273                         continue;
3274                 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3275                 if (rc)
3276                         return rc;
3277         }
3278         return 0;
3279 }
3280
3281 static void bnxt_free_cp_rings(struct bnxt *bp)
3282 {
3283         int i;
3284
3285         if (!bp->bnapi)
3286                 return;
3287
3288         for (i = 0; i < bp->cp_nr_rings; i++) {
3289                 struct bnxt_napi *bnapi = bp->bnapi[i];
3290                 struct bnxt_cp_ring_info *cpr;
3291                 struct bnxt_ring_struct *ring;
3292                 int j;
3293
3294                 if (!bnapi)
3295                         continue;
3296
3297                 cpr = &bnapi->cp_ring;
3298                 ring = &cpr->cp_ring_struct;
3299
3300                 bnxt_free_ring(bp, &ring->ring_mem);
3301
3302                 for (j = 0; j < 2; j++) {
3303                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3304
3305                         if (cpr2) {
3306                                 ring = &cpr2->cp_ring_struct;
3307                                 bnxt_free_ring(bp, &ring->ring_mem);
3308                                 bnxt_free_cp_arrays(cpr2);
3309                                 kfree(cpr2);
3310                                 cpr->cp_ring_arr[j] = NULL;
3311                         }
3312                 }
3313         }
3314 }
3315
3316 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3317 {
3318         struct bnxt_ring_mem_info *rmem;
3319         struct bnxt_ring_struct *ring;
3320         struct bnxt_cp_ring_info *cpr;
3321         int rc;
3322
3323         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3324         if (!cpr)
3325                 return NULL;
3326
3327         rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3328         if (rc) {
3329                 bnxt_free_cp_arrays(cpr);
3330                 kfree(cpr);
3331                 return NULL;
3332         }
3333         ring = &cpr->cp_ring_struct;
3334         rmem = &ring->ring_mem;
3335         rmem->nr_pages = bp->cp_nr_pages;
3336         rmem->page_size = HW_CMPD_RING_SIZE;
3337         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3338         rmem->dma_arr = cpr->cp_desc_mapping;
3339         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3340         rc = bnxt_alloc_ring(bp, rmem);
3341         if (rc) {
3342                 bnxt_free_ring(bp, rmem);
3343                 bnxt_free_cp_arrays(cpr);
3344                 kfree(cpr);
3345                 cpr = NULL;
3346         }
3347         return cpr;
3348 }
3349
3350 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3351 {
3352         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3353         int i, rc, ulp_base_vec, ulp_msix;
3354
3355         ulp_msix = bnxt_get_ulp_msix_num(bp);
3356         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3357         for (i = 0; i < bp->cp_nr_rings; i++) {
3358                 struct bnxt_napi *bnapi = bp->bnapi[i];
3359                 struct bnxt_cp_ring_info *cpr;
3360                 struct bnxt_ring_struct *ring;
3361
3362                 if (!bnapi)
3363                         continue;
3364
3365                 cpr = &bnapi->cp_ring;
3366                 cpr->bnapi = bnapi;
3367                 ring = &cpr->cp_ring_struct;
3368
3369                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3370                 if (rc)
3371                         return rc;
3372
3373                 if (ulp_msix && i >= ulp_base_vec)
3374                         ring->map_idx = i + ulp_msix;
3375                 else
3376                         ring->map_idx = i;
3377
3378                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3379                         continue;
3380
3381                 if (i < bp->rx_nr_rings) {
3382                         struct bnxt_cp_ring_info *cpr2 =
3383                                 bnxt_alloc_cp_sub_ring(bp);
3384
3385                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3386                         if (!cpr2)
3387                                 return -ENOMEM;
3388                         cpr2->bnapi = bnapi;
3389                 }
3390                 if ((sh && i < bp->tx_nr_rings) ||
3391                     (!sh && i >= bp->rx_nr_rings)) {
3392                         struct bnxt_cp_ring_info *cpr2 =
3393                                 bnxt_alloc_cp_sub_ring(bp);
3394
3395                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3396                         if (!cpr2)
3397                                 return -ENOMEM;
3398                         cpr2->bnapi = bnapi;
3399                 }
3400         }
3401         return 0;
3402 }
3403
3404 static void bnxt_init_ring_struct(struct bnxt *bp)
3405 {
3406         int i;
3407
3408         for (i = 0; i < bp->cp_nr_rings; i++) {
3409                 struct bnxt_napi *bnapi = bp->bnapi[i];
3410                 struct bnxt_ring_mem_info *rmem;
3411                 struct bnxt_cp_ring_info *cpr;
3412                 struct bnxt_rx_ring_info *rxr;
3413                 struct bnxt_tx_ring_info *txr;
3414                 struct bnxt_ring_struct *ring;
3415
3416                 if (!bnapi)
3417                         continue;
3418
3419                 cpr = &bnapi->cp_ring;
3420                 ring = &cpr->cp_ring_struct;
3421                 rmem = &ring->ring_mem;
3422                 rmem->nr_pages = bp->cp_nr_pages;
3423                 rmem->page_size = HW_CMPD_RING_SIZE;
3424                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3425                 rmem->dma_arr = cpr->cp_desc_mapping;
3426                 rmem->vmem_size = 0;
3427
3428                 rxr = bnapi->rx_ring;
3429                 if (!rxr)
3430                         goto skip_rx;
3431
3432                 ring = &rxr->rx_ring_struct;
3433                 rmem = &ring->ring_mem;
3434                 rmem->nr_pages = bp->rx_nr_pages;
3435                 rmem->page_size = HW_RXBD_RING_SIZE;
3436                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3437                 rmem->dma_arr = rxr->rx_desc_mapping;
3438                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3439                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3440
3441                 ring = &rxr->rx_agg_ring_struct;
3442                 rmem = &ring->ring_mem;
3443                 rmem->nr_pages = bp->rx_agg_nr_pages;
3444                 rmem->page_size = HW_RXBD_RING_SIZE;
3445                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3446                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3447                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3448                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3449
3450 skip_rx:
3451                 txr = bnapi->tx_ring;
3452                 if (!txr)
3453                         continue;
3454
3455                 ring = &txr->tx_ring_struct;
3456                 rmem = &ring->ring_mem;
3457                 rmem->nr_pages = bp->tx_nr_pages;
3458                 rmem->page_size = HW_RXBD_RING_SIZE;
3459                 rmem->pg_arr = (void **)txr->tx_desc_ring;
3460                 rmem->dma_arr = txr->tx_desc_mapping;
3461                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3462                 rmem->vmem = (void **)&txr->tx_buf_ring;
3463         }
3464 }
3465
3466 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3467 {
3468         int i;
3469         u32 prod;
3470         struct rx_bd **rx_buf_ring;
3471
3472         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3473         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3474                 int j;
3475                 struct rx_bd *rxbd;
3476
3477                 rxbd = rx_buf_ring[i];
3478                 if (!rxbd)
3479                         continue;
3480
3481                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3482                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3483                         rxbd->rx_bd_opaque = prod;
3484                 }
3485         }
3486 }
3487
3488 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3489 {
3490         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3491         struct net_device *dev = bp->dev;
3492         u32 prod;
3493         int i;
3494
3495         prod = rxr->rx_prod;
3496         for (i = 0; i < bp->rx_ring_size; i++) {
3497                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3498                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3499                                     ring_nr, i, bp->rx_ring_size);
3500                         break;
3501                 }
3502                 prod = NEXT_RX(prod);
3503         }
3504         rxr->rx_prod = prod;
3505
3506         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3507                 return 0;
3508
3509         prod = rxr->rx_agg_prod;
3510         for (i = 0; i < bp->rx_agg_ring_size; i++) {
3511                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3512                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3513                                     ring_nr, i, bp->rx_ring_size);
3514                         break;
3515                 }
3516                 prod = NEXT_RX_AGG(prod);
3517         }
3518         rxr->rx_agg_prod = prod;
3519
3520         if (rxr->rx_tpa) {
3521                 dma_addr_t mapping;
3522                 u8 *data;
3523
3524                 for (i = 0; i < bp->max_tpa; i++) {
3525                         data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3526                         if (!data)
3527                                 return -ENOMEM;
3528
3529                         rxr->rx_tpa[i].data = data;
3530                         rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3531                         rxr->rx_tpa[i].mapping = mapping;
3532                 }
3533         }
3534         return 0;
3535 }
3536
3537 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3538 {
3539         struct bnxt_rx_ring_info *rxr;
3540         struct bnxt_ring_struct *ring;
3541         u32 type;
3542
3543         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3544                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3545
3546         if (NET_IP_ALIGN == 2)
3547                 type |= RX_BD_FLAGS_SOP;
3548
3549         rxr = &bp->rx_ring[ring_nr];
3550         ring = &rxr->rx_ring_struct;
3551         bnxt_init_rxbd_pages(ring, type);
3552
3553         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3554                 bpf_prog_add(bp->xdp_prog, 1);
3555                 rxr->xdp_prog = bp->xdp_prog;
3556         }
3557         ring->fw_ring_id = INVALID_HW_RING_ID;
3558
3559         ring = &rxr->rx_agg_ring_struct;
3560         ring->fw_ring_id = INVALID_HW_RING_ID;
3561
3562         if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3563                 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3564                         RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3565
3566                 bnxt_init_rxbd_pages(ring, type);
3567         }
3568
3569         return bnxt_alloc_one_rx_ring(bp, ring_nr);
3570 }
3571
3572 static void bnxt_init_cp_rings(struct bnxt *bp)
3573 {
3574         int i, j;
3575
3576         for (i = 0; i < bp->cp_nr_rings; i++) {
3577                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3578                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3579
3580                 ring->fw_ring_id = INVALID_HW_RING_ID;
3581                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3582                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3583                 for (j = 0; j < 2; j++) {
3584                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3585
3586                         if (!cpr2)
3587                                 continue;
3588
3589                         ring = &cpr2->cp_ring_struct;
3590                         ring->fw_ring_id = INVALID_HW_RING_ID;
3591                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3592                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3593                 }
3594         }
3595 }
3596
3597 static int bnxt_init_rx_rings(struct bnxt *bp)
3598 {
3599         int i, rc = 0;
3600
3601         if (BNXT_RX_PAGE_MODE(bp)) {
3602                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3603                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3604         } else {
3605                 bp->rx_offset = BNXT_RX_OFFSET;
3606                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3607         }
3608
3609         for (i = 0; i < bp->rx_nr_rings; i++) {
3610                 rc = bnxt_init_one_rx_ring(bp, i);
3611                 if (rc)
3612                         break;
3613         }
3614
3615         return rc;
3616 }
3617
3618 static int bnxt_init_tx_rings(struct bnxt *bp)
3619 {
3620         u16 i;
3621
3622         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3623                                    MAX_SKB_FRAGS + 1);
3624
3625         for (i = 0; i < bp->tx_nr_rings; i++) {
3626                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3627                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3628
3629                 ring->fw_ring_id = INVALID_HW_RING_ID;
3630         }
3631
3632         return 0;
3633 }
3634
3635 static void bnxt_free_ring_grps(struct bnxt *bp)
3636 {
3637         kfree(bp->grp_info);
3638         bp->grp_info = NULL;
3639 }
3640
3641 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3642 {
3643         int i;
3644
3645         if (irq_re_init) {
3646                 bp->grp_info = kcalloc(bp->cp_nr_rings,
3647                                        sizeof(struct bnxt_ring_grp_info),
3648                                        GFP_KERNEL);
3649                 if (!bp->grp_info)
3650                         return -ENOMEM;
3651         }
3652         for (i = 0; i < bp->cp_nr_rings; i++) {
3653                 if (irq_re_init)
3654                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3655                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3656                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3657                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3658                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3659         }
3660         return 0;
3661 }
3662
3663 static void bnxt_free_vnics(struct bnxt *bp)
3664 {
3665         kfree(bp->vnic_info);
3666         bp->vnic_info = NULL;
3667         bp->nr_vnics = 0;
3668 }
3669
3670 static int bnxt_alloc_vnics(struct bnxt *bp)
3671 {
3672         int num_vnics = 1;
3673
3674 #ifdef CONFIG_RFS_ACCEL
3675         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3676                 num_vnics += bp->rx_nr_rings;
3677 #endif
3678
3679         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3680                 num_vnics++;
3681
3682         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3683                                 GFP_KERNEL);
3684         if (!bp->vnic_info)
3685                 return -ENOMEM;
3686
3687         bp->nr_vnics = num_vnics;
3688         return 0;
3689 }
3690
3691 static void bnxt_init_vnics(struct bnxt *bp)
3692 {
3693         int i;
3694
3695         for (i = 0; i < bp->nr_vnics; i++) {
3696                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3697                 int j;
3698
3699                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3700                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3701                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3702
3703                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3704
3705                 if (bp->vnic_info[i].rss_hash_key) {
3706                         if (i == 0)
3707                                 prandom_bytes(vnic->rss_hash_key,
3708                                               HW_HASH_KEY_SIZE);
3709                         else
3710                                 memcpy(vnic->rss_hash_key,
3711                                        bp->vnic_info[0].rss_hash_key,
3712                                        HW_HASH_KEY_SIZE);
3713                 }
3714         }
3715 }
3716
3717 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3718 {
3719         int pages;
3720
3721         pages = ring_size / desc_per_pg;
3722
3723         if (!pages)
3724                 return 1;
3725
3726         pages++;
3727
3728         while (pages & (pages - 1))
3729                 pages++;
3730
3731         return pages;
3732 }
3733
3734 void bnxt_set_tpa_flags(struct bnxt *bp)
3735 {
3736         bp->flags &= ~BNXT_FLAG_TPA;
3737         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3738                 return;
3739         if (bp->dev->features & NETIF_F_LRO)
3740                 bp->flags |= BNXT_FLAG_LRO;
3741         else if (bp->dev->features & NETIF_F_GRO_HW)
3742                 bp->flags |= BNXT_FLAG_GRO;
3743 }
3744
3745 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3746  * be set on entry.
3747  */
3748 void bnxt_set_ring_params(struct bnxt *bp)
3749 {
3750         u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3751         u32 agg_factor = 0, agg_ring_size = 0;
3752
3753         /* 8 for CRC and VLAN */
3754         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3755
3756         rx_space = rx_size + NET_SKB_PAD +
3757                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3758
3759         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3760         ring_size = bp->rx_ring_size;
3761         bp->rx_agg_ring_size = 0;
3762         bp->rx_agg_nr_pages = 0;
3763
3764         if (bp->flags & BNXT_FLAG_TPA)
3765                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3766
3767         bp->flags &= ~BNXT_FLAG_JUMBO;
3768         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3769                 u32 jumbo_factor;
3770
3771                 bp->flags |= BNXT_FLAG_JUMBO;
3772                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3773                 if (jumbo_factor > agg_factor)
3774                         agg_factor = jumbo_factor;
3775         }
3776         if (agg_factor) {
3777                 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3778                         ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3779                         netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3780                                     bp->rx_ring_size, ring_size);
3781                         bp->rx_ring_size = ring_size;
3782                 }
3783                 agg_ring_size = ring_size * agg_factor;
3784
3785                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3786                                                         RX_DESC_CNT);
3787                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3788                         u32 tmp = agg_ring_size;
3789
3790                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3791                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3792                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3793                                     tmp, agg_ring_size);
3794                 }
3795                 bp->rx_agg_ring_size = agg_ring_size;
3796                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3797                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3798                 rx_space = rx_size + NET_SKB_PAD +
3799                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3800         }
3801
3802         bp->rx_buf_use_size = rx_size;
3803         bp->rx_buf_size = rx_space;
3804
3805         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3806         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3807
3808         ring_size = bp->tx_ring_size;
3809         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3810         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3811
3812         max_rx_cmpl = bp->rx_ring_size;
3813         /* MAX TPA needs to be added because TPA_START completions are
3814          * immediately recycled, so the TPA completions are not bound by
3815          * the RX ring size.
3816          */
3817         if (bp->flags & BNXT_FLAG_TPA)
3818                 max_rx_cmpl += bp->max_tpa;
3819         /* RX and TPA completions are 32-byte, all others are 16-byte */
3820         ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3821         bp->cp_ring_size = ring_size;
3822
3823         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3824         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3825                 bp->cp_nr_pages = MAX_CP_PAGES;
3826                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3827                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3828                             ring_size, bp->cp_ring_size);
3829         }
3830         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3831         bp->cp_ring_mask = bp->cp_bit - 1;
3832 }
3833
3834 /* Changing allocation mode of RX rings.
3835  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3836  */
3837 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3838 {
3839         if (page_mode) {
3840                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3841                         return -EOPNOTSUPP;
3842                 bp->dev->max_mtu =
3843                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3844                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3845                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3846                 bp->rx_dir = DMA_BIDIRECTIONAL;
3847                 bp->rx_skb_func = bnxt_rx_page_skb;
3848                 /* Disable LRO or GRO_HW */
3849                 netdev_update_features(bp->dev);
3850         } else {
3851                 bp->dev->max_mtu = bp->max_mtu;
3852                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3853                 bp->rx_dir = DMA_FROM_DEVICE;
3854                 bp->rx_skb_func = bnxt_rx_skb;
3855         }
3856         return 0;
3857 }
3858
3859 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3860 {
3861         int i;
3862         struct bnxt_vnic_info *vnic;
3863         struct pci_dev *pdev = bp->pdev;
3864
3865         if (!bp->vnic_info)
3866                 return;
3867
3868         for (i = 0; i < bp->nr_vnics; i++) {
3869                 vnic = &bp->vnic_info[i];
3870
3871                 kfree(vnic->fw_grp_ids);
3872                 vnic->fw_grp_ids = NULL;
3873
3874                 kfree(vnic->uc_list);
3875                 vnic->uc_list = NULL;
3876
3877                 if (vnic->mc_list) {
3878                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3879                                           vnic->mc_list, vnic->mc_list_mapping);
3880                         vnic->mc_list = NULL;
3881                 }
3882
3883                 if (vnic->rss_table) {
3884                         dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3885                                           vnic->rss_table,
3886                                           vnic->rss_table_dma_addr);
3887                         vnic->rss_table = NULL;
3888                 }
3889
3890                 vnic->rss_hash_key = NULL;
3891                 vnic->flags = 0;
3892         }
3893 }
3894
3895 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3896 {
3897         int i, rc = 0, size;
3898         struct bnxt_vnic_info *vnic;
3899         struct pci_dev *pdev = bp->pdev;
3900         int max_rings;
3901
3902         for (i = 0; i < bp->nr_vnics; i++) {
3903                 vnic = &bp->vnic_info[i];
3904
3905                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3906                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3907
3908                         if (mem_size > 0) {
3909                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3910                                 if (!vnic->uc_list) {
3911                                         rc = -ENOMEM;
3912                                         goto out;
3913                                 }
3914                         }
3915                 }
3916
3917                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3918                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3919                         vnic->mc_list =
3920                                 dma_alloc_coherent(&pdev->dev,
3921                                                    vnic->mc_list_size,
3922                                                    &vnic->mc_list_mapping,
3923                                                    GFP_KERNEL);
3924                         if (!vnic->mc_list) {
3925                                 rc = -ENOMEM;
3926                                 goto out;
3927                         }
3928                 }
3929
3930                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3931                         goto vnic_skip_grps;
3932
3933                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3934                         max_rings = bp->rx_nr_rings;
3935                 else
3936                         max_rings = 1;
3937
3938                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3939                 if (!vnic->fw_grp_ids) {
3940                         rc = -ENOMEM;
3941                         goto out;
3942                 }
3943 vnic_skip_grps:
3944                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3945                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3946                         continue;
3947
3948                 /* Allocate rss table and hash key */
3949                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3950                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3951                         size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3952
3953                 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3954                 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3955                                                      vnic->rss_table_size,
3956                                                      &vnic->rss_table_dma_addr,
3957                                                      GFP_KERNEL);
3958                 if (!vnic->rss_table) {
3959                         rc = -ENOMEM;
3960                         goto out;
3961                 }
3962
3963                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3964                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3965         }
3966         return 0;
3967
3968 out:
3969         return rc;
3970 }
3971
3972 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3973 {
3974         struct bnxt_hwrm_wait_token *token;
3975
3976         dma_pool_destroy(bp->hwrm_dma_pool);
3977         bp->hwrm_dma_pool = NULL;
3978
3979         rcu_read_lock();
3980         hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
3981                 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
3982         rcu_read_unlock();
3983 }
3984
3985 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3986 {
3987         bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
3988                                             BNXT_HWRM_DMA_SIZE,
3989                                             BNXT_HWRM_DMA_ALIGN, 0);
3990         if (!bp->hwrm_dma_pool)
3991                 return -ENOMEM;
3992
3993         INIT_HLIST_HEAD(&bp->hwrm_pending_list);
3994
3995         return 0;
3996 }
3997
3998 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3999 {
4000         kfree(stats->hw_masks);
4001         stats->hw_masks = NULL;
4002         kfree(stats->sw_stats);
4003         stats->sw_stats = NULL;
4004         if (stats->hw_stats) {
4005                 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4006                                   stats->hw_stats_map);
4007                 stats->hw_stats = NULL;
4008         }
4009 }
4010
4011 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4012                                 bool alloc_masks)
4013 {
4014         stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4015                                              &stats->hw_stats_map, GFP_KERNEL);
4016         if (!stats->hw_stats)
4017                 return -ENOMEM;
4018
4019         stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4020         if (!stats->sw_stats)
4021                 goto stats_mem_err;
4022
4023         if (alloc_masks) {
4024                 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4025                 if (!stats->hw_masks)
4026                         goto stats_mem_err;
4027         }
4028         return 0;
4029
4030 stats_mem_err:
4031         bnxt_free_stats_mem(bp, stats);
4032         return -ENOMEM;
4033 }
4034
4035 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4036 {
4037         int i;
4038
4039         for (i = 0; i < count; i++)
4040                 mask_arr[i] = mask;
4041 }
4042
4043 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4044 {
4045         int i;
4046
4047         for (i = 0; i < count; i++)
4048                 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4049 }
4050
4051 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4052                                     struct bnxt_stats_mem *stats)
4053 {
4054         struct hwrm_func_qstats_ext_output *resp;
4055         struct hwrm_func_qstats_ext_input *req;
4056         __le64 *hw_masks;
4057         int rc;
4058
4059         if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4060             !(bp->flags & BNXT_FLAG_CHIP_P5))
4061                 return -EOPNOTSUPP;
4062
4063         rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4064         if (rc)
4065                 return rc;
4066
4067         req->fid = cpu_to_le16(0xffff);
4068         req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4069
4070         resp = hwrm_req_hold(bp, req);
4071         rc = hwrm_req_send(bp, req);
4072         if (!rc) {
4073                 hw_masks = &resp->rx_ucast_pkts;
4074                 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4075         }
4076         hwrm_req_drop(bp, req);
4077         return rc;
4078 }
4079
4080 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4081 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4082
4083 static void bnxt_init_stats(struct bnxt *bp)
4084 {
4085         struct bnxt_napi *bnapi = bp->bnapi[0];
4086         struct bnxt_cp_ring_info *cpr;
4087         struct bnxt_stats_mem *stats;
4088         __le64 *rx_stats, *tx_stats;
4089         int rc, rx_count, tx_count;
4090         u64 *rx_masks, *tx_masks;
4091         u64 mask;
4092         u8 flags;
4093
4094         cpr = &bnapi->cp_ring;
4095         stats = &cpr->stats;
4096         rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4097         if (rc) {
4098                 if (bp->flags & BNXT_FLAG_CHIP_P5)
4099                         mask = (1ULL << 48) - 1;
4100                 else
4101                         mask = -1ULL;
4102                 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4103         }
4104         if (bp->flags & BNXT_FLAG_PORT_STATS) {
4105                 stats = &bp->port_stats;
4106                 rx_stats = stats->hw_stats;
4107                 rx_masks = stats->hw_masks;
4108                 rx_count = sizeof(struct rx_port_stats) / 8;
4109                 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4110                 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4111                 tx_count = sizeof(struct tx_port_stats) / 8;
4112
4113                 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4114                 rc = bnxt_hwrm_port_qstats(bp, flags);
4115                 if (rc) {
4116                         mask = (1ULL << 40) - 1;
4117
4118                         bnxt_fill_masks(rx_masks, mask, rx_count);
4119                         bnxt_fill_masks(tx_masks, mask, tx_count);
4120                 } else {
4121                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4122                         bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4123                         bnxt_hwrm_port_qstats(bp, 0);
4124                 }
4125         }
4126         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4127                 stats = &bp->rx_port_stats_ext;
4128                 rx_stats = stats->hw_stats;
4129                 rx_masks = stats->hw_masks;
4130                 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4131                 stats = &bp->tx_port_stats_ext;
4132                 tx_stats = stats->hw_stats;
4133                 tx_masks = stats->hw_masks;
4134                 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4135
4136                 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4137                 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4138                 if (rc) {
4139                         mask = (1ULL << 40) - 1;
4140
4141                         bnxt_fill_masks(rx_masks, mask, rx_count);
4142                         if (tx_stats)
4143                                 bnxt_fill_masks(tx_masks, mask, tx_count);
4144                 } else {
4145                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4146                         if (tx_stats)
4147                                 bnxt_copy_hw_masks(tx_masks, tx_stats,
4148                                                    tx_count);
4149                         bnxt_hwrm_port_qstats_ext(bp, 0);
4150                 }
4151         }
4152 }
4153
4154 static void bnxt_free_port_stats(struct bnxt *bp)
4155 {
4156         bp->flags &= ~BNXT_FLAG_PORT_STATS;
4157         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4158
4159         bnxt_free_stats_mem(bp, &bp->port_stats);
4160         bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4161         bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4162 }
4163
4164 static void bnxt_free_ring_stats(struct bnxt *bp)
4165 {
4166         int i;
4167
4168         if (!bp->bnapi)
4169                 return;
4170
4171         for (i = 0; i < bp->cp_nr_rings; i++) {
4172                 struct bnxt_napi *bnapi = bp->bnapi[i];
4173                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4174
4175                 bnxt_free_stats_mem(bp, &cpr->stats);
4176         }
4177 }
4178
4179 static int bnxt_alloc_stats(struct bnxt *bp)
4180 {
4181         u32 size, i;
4182         int rc;
4183
4184         size = bp->hw_ring_stats_size;
4185
4186         for (i = 0; i < bp->cp_nr_rings; i++) {
4187                 struct bnxt_napi *bnapi = bp->bnapi[i];
4188                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4189
4190                 cpr->stats.len = size;
4191                 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4192                 if (rc)
4193                         return rc;
4194
4195                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4196         }
4197
4198         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4199                 return 0;
4200
4201         if (bp->port_stats.hw_stats)
4202                 goto alloc_ext_stats;
4203
4204         bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4205         rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4206         if (rc)
4207                 return rc;
4208
4209         bp->flags |= BNXT_FLAG_PORT_STATS;
4210
4211 alloc_ext_stats:
4212         /* Display extended statistics only if FW supports it */
4213         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4214                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4215                         return 0;
4216
4217         if (bp->rx_port_stats_ext.hw_stats)
4218                 goto alloc_tx_ext_stats;
4219
4220         bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4221         rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4222         /* Extended stats are optional */
4223         if (rc)
4224                 return 0;
4225
4226 alloc_tx_ext_stats:
4227         if (bp->tx_port_stats_ext.hw_stats)
4228                 return 0;
4229
4230         if (bp->hwrm_spec_code >= 0x10902 ||
4231             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4232                 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4233                 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4234                 /* Extended stats are optional */
4235                 if (rc)
4236                         return 0;
4237         }
4238         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4239         return 0;
4240 }
4241
4242 static void bnxt_clear_ring_indices(struct bnxt *bp)
4243 {
4244         int i;
4245
4246         if (!bp->bnapi)
4247                 return;
4248
4249         for (i = 0; i < bp->cp_nr_rings; i++) {
4250                 struct bnxt_napi *bnapi = bp->bnapi[i];
4251                 struct bnxt_cp_ring_info *cpr;
4252                 struct bnxt_rx_ring_info *rxr;
4253                 struct bnxt_tx_ring_info *txr;
4254
4255                 if (!bnapi)
4256                         continue;
4257
4258                 cpr = &bnapi->cp_ring;
4259                 cpr->cp_raw_cons = 0;
4260
4261                 txr = bnapi->tx_ring;
4262                 if (txr) {
4263                         txr->tx_prod = 0;
4264                         txr->tx_cons = 0;
4265                 }
4266
4267                 rxr = bnapi->rx_ring;
4268                 if (rxr) {
4269                         rxr->rx_prod = 0;
4270                         rxr->rx_agg_prod = 0;
4271                         rxr->rx_sw_agg_prod = 0;
4272                         rxr->rx_next_cons = 0;
4273                 }
4274         }
4275 }
4276
4277 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4278 {
4279 #ifdef CONFIG_RFS_ACCEL
4280         int i;
4281
4282         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
4283          * safe to delete the hash table.
4284          */
4285         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4286                 struct hlist_head *head;
4287                 struct hlist_node *tmp;
4288                 struct bnxt_ntuple_filter *fltr;
4289
4290                 head = &bp->ntp_fltr_hash_tbl[i];
4291                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4292                         hlist_del(&fltr->hash);
4293                         kfree(fltr);
4294                 }
4295         }
4296         if (irq_reinit) {
4297                 kfree(bp->ntp_fltr_bmap);
4298                 bp->ntp_fltr_bmap = NULL;
4299         }
4300         bp->ntp_fltr_count = 0;
4301 #endif
4302 }
4303
4304 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4305 {
4306 #ifdef CONFIG_RFS_ACCEL
4307         int i, rc = 0;
4308
4309         if (!(bp->flags & BNXT_FLAG_RFS))
4310                 return 0;
4311
4312         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4313                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4314
4315         bp->ntp_fltr_count = 0;
4316         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4317                                     sizeof(long),
4318                                     GFP_KERNEL);
4319
4320         if (!bp->ntp_fltr_bmap)
4321                 rc = -ENOMEM;
4322
4323         return rc;
4324 #else
4325         return 0;
4326 #endif
4327 }
4328
4329 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4330 {
4331         bnxt_free_vnic_attributes(bp);
4332         bnxt_free_tx_rings(bp);
4333         bnxt_free_rx_rings(bp);
4334         bnxt_free_cp_rings(bp);
4335         bnxt_free_all_cp_arrays(bp);
4336         bnxt_free_ntp_fltrs(bp, irq_re_init);
4337         if (irq_re_init) {
4338                 bnxt_free_ring_stats(bp);
4339                 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4340                     test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4341                         bnxt_free_port_stats(bp);
4342                 bnxt_free_ring_grps(bp);
4343                 bnxt_free_vnics(bp);
4344                 kfree(bp->tx_ring_map);
4345                 bp->tx_ring_map = NULL;
4346                 kfree(bp->tx_ring);
4347                 bp->tx_ring = NULL;
4348                 kfree(bp->rx_ring);
4349                 bp->rx_ring = NULL;
4350                 kfree(bp->bnapi);
4351                 bp->bnapi = NULL;
4352         } else {
4353                 bnxt_clear_ring_indices(bp);
4354         }
4355 }
4356
4357 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4358 {
4359         int i, j, rc, size, arr_size;
4360         void *bnapi;
4361
4362         if (irq_re_init) {
4363                 /* Allocate bnapi mem pointer array and mem block for
4364                  * all queues
4365                  */
4366                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4367                                 bp->cp_nr_rings);
4368                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4369                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4370                 if (!bnapi)
4371                         return -ENOMEM;
4372
4373                 bp->bnapi = bnapi;
4374                 bnapi += arr_size;
4375                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4376                         bp->bnapi[i] = bnapi;
4377                         bp->bnapi[i]->index = i;
4378                         bp->bnapi[i]->bp = bp;
4379                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4380                                 struct bnxt_cp_ring_info *cpr =
4381                                         &bp->bnapi[i]->cp_ring;
4382
4383                                 cpr->cp_ring_struct.ring_mem.flags =
4384                                         BNXT_RMEM_RING_PTE_FLAG;
4385                         }
4386                 }
4387
4388                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4389                                       sizeof(struct bnxt_rx_ring_info),
4390                                       GFP_KERNEL);
4391                 if (!bp->rx_ring)
4392                         return -ENOMEM;
4393
4394                 for (i = 0; i < bp->rx_nr_rings; i++) {
4395                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4396
4397                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4398                                 rxr->rx_ring_struct.ring_mem.flags =
4399                                         BNXT_RMEM_RING_PTE_FLAG;
4400                                 rxr->rx_agg_ring_struct.ring_mem.flags =
4401                                         BNXT_RMEM_RING_PTE_FLAG;
4402                         }
4403                         rxr->bnapi = bp->bnapi[i];
4404                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4405                 }
4406
4407                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4408                                       sizeof(struct bnxt_tx_ring_info),
4409                                       GFP_KERNEL);
4410                 if (!bp->tx_ring)
4411                         return -ENOMEM;
4412
4413                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4414                                           GFP_KERNEL);
4415
4416                 if (!bp->tx_ring_map)
4417                         return -ENOMEM;
4418
4419                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4420                         j = 0;
4421                 else
4422                         j = bp->rx_nr_rings;
4423
4424                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4425                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4426
4427                         if (bp->flags & BNXT_FLAG_CHIP_P5)
4428                                 txr->tx_ring_struct.ring_mem.flags =
4429                                         BNXT_RMEM_RING_PTE_FLAG;
4430                         txr->bnapi = bp->bnapi[j];
4431                         bp->bnapi[j]->tx_ring = txr;
4432                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4433                         if (i >= bp->tx_nr_rings_xdp) {
4434                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4435                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
4436                         } else {
4437                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4438                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4439                         }
4440                 }
4441
4442                 rc = bnxt_alloc_stats(bp);
4443                 if (rc)
4444                         goto alloc_mem_err;
4445                 bnxt_init_stats(bp);
4446
4447                 rc = bnxt_alloc_ntp_fltrs(bp);
4448                 if (rc)
4449                         goto alloc_mem_err;
4450
4451                 rc = bnxt_alloc_vnics(bp);
4452                 if (rc)
4453                         goto alloc_mem_err;
4454         }
4455
4456         rc = bnxt_alloc_all_cp_arrays(bp);
4457         if (rc)
4458                 goto alloc_mem_err;
4459
4460         bnxt_init_ring_struct(bp);
4461
4462         rc = bnxt_alloc_rx_rings(bp);
4463         if (rc)
4464                 goto alloc_mem_err;
4465
4466         rc = bnxt_alloc_tx_rings(bp);
4467         if (rc)
4468                 goto alloc_mem_err;
4469
4470         rc = bnxt_alloc_cp_rings(bp);
4471         if (rc)
4472                 goto alloc_mem_err;
4473
4474         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4475                                   BNXT_VNIC_UCAST_FLAG;
4476         rc = bnxt_alloc_vnic_attributes(bp);
4477         if (rc)
4478                 goto alloc_mem_err;
4479         return 0;
4480
4481 alloc_mem_err:
4482         bnxt_free_mem(bp, true);
4483         return rc;
4484 }
4485
4486 static void bnxt_disable_int(struct bnxt *bp)
4487 {
4488         int i;
4489
4490         if (!bp->bnapi)
4491                 return;
4492
4493         for (i = 0; i < bp->cp_nr_rings; i++) {
4494                 struct bnxt_napi *bnapi = bp->bnapi[i];
4495                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4496                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4497
4498                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4499                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4500         }
4501 }
4502
4503 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4504 {
4505         struct bnxt_napi *bnapi = bp->bnapi[n];
4506         struct bnxt_cp_ring_info *cpr;
4507
4508         cpr = &bnapi->cp_ring;
4509         return cpr->cp_ring_struct.map_idx;
4510 }
4511
4512 static void bnxt_disable_int_sync(struct bnxt *bp)
4513 {
4514         int i;
4515
4516         if (!bp->irq_tbl)
4517                 return;
4518
4519         atomic_inc(&bp->intr_sem);
4520
4521         bnxt_disable_int(bp);
4522         for (i = 0; i < bp->cp_nr_rings; i++) {
4523                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4524
4525                 synchronize_irq(bp->irq_tbl[map_idx].vector);
4526         }
4527 }
4528
4529 static void bnxt_enable_int(struct bnxt *bp)
4530 {
4531         int i;
4532
4533         atomic_set(&bp->intr_sem, 0);
4534         for (i = 0; i < bp->cp_nr_rings; i++) {
4535                 struct bnxt_napi *bnapi = bp->bnapi[i];
4536                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4537
4538                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4539         }
4540 }
4541
4542 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4543                             bool async_only)
4544 {
4545         DECLARE_BITMAP(async_events_bmap, 256);
4546         u32 *events = (u32 *)async_events_bmap;
4547         struct hwrm_func_drv_rgtr_output *resp;
4548         struct hwrm_func_drv_rgtr_input *req;
4549         u32 flags;
4550         int rc, i;
4551
4552         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4553         if (rc)
4554                 return rc;
4555
4556         req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4557                                    FUNC_DRV_RGTR_REQ_ENABLES_VER |
4558                                    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4559
4560         req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4561         flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4562         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4563                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4564         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4565                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4566                          FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4567         req->flags = cpu_to_le32(flags);
4568         req->ver_maj_8b = DRV_VER_MAJ;
4569         req->ver_min_8b = DRV_VER_MIN;
4570         req->ver_upd_8b = DRV_VER_UPD;
4571         req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4572         req->ver_min = cpu_to_le16(DRV_VER_MIN);
4573         req->ver_upd = cpu_to_le16(DRV_VER_UPD);
4574
4575         if (BNXT_PF(bp)) {
4576                 u32 data[8];
4577                 int i;
4578
4579                 memset(data, 0, sizeof(data));
4580                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4581                         u16 cmd = bnxt_vf_req_snif[i];
4582                         unsigned int bit, idx;
4583
4584                         idx = cmd / 32;
4585                         bit = cmd % 32;
4586                         data[idx] |= 1 << bit;
4587                 }
4588
4589                 for (i = 0; i < 8; i++)
4590                         req->vf_req_fwd[i] = cpu_to_le32(data[i]);
4591
4592                 req->enables |=
4593                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4594         }
4595
4596         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4597                 req->flags |= cpu_to_le32(
4598                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4599
4600         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4601         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4602                 u16 event_id = bnxt_async_events_arr[i];
4603
4604                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4605                     !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4606                         continue;
4607                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4608         }
4609         if (bmap && bmap_size) {
4610                 for (i = 0; i < bmap_size; i++) {
4611                         if (test_bit(i, bmap))
4612                                 __set_bit(i, async_events_bmap);
4613                 }
4614         }
4615         for (i = 0; i < 8; i++)
4616                 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
4617
4618         if (async_only)
4619                 req->enables =
4620                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4621
4622         resp = hwrm_req_hold(bp, req);
4623         rc = hwrm_req_send(bp, req);
4624         if (!rc) {
4625                 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4626                 if (resp->flags &
4627                     cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4628                         bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4629         }
4630         hwrm_req_drop(bp, req);
4631         return rc;
4632 }
4633
4634 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4635 {
4636         struct hwrm_func_drv_unrgtr_input *req;
4637         int rc;
4638
4639         if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4640                 return 0;
4641
4642         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4643         if (rc)
4644                 return rc;
4645         return hwrm_req_send(bp, req);
4646 }
4647
4648 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4649 {
4650         struct hwrm_tunnel_dst_port_free_input *req;
4651         int rc;
4652
4653         if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
4654             bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
4655                 return 0;
4656         if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
4657             bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
4658                 return 0;
4659
4660         rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4661         if (rc)
4662                 return rc;
4663
4664         req->tunnel_type = tunnel_type;
4665
4666         switch (tunnel_type) {
4667         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4668                 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4669                 bp->vxlan_port = 0;
4670                 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4671                 break;
4672         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4673                 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4674                 bp->nge_port = 0;
4675                 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4676                 break;
4677         default:
4678                 break;
4679         }
4680
4681         rc = hwrm_req_send(bp, req);
4682         if (rc)
4683                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4684                            rc);
4685         return rc;
4686 }
4687
4688 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4689                                            u8 tunnel_type)
4690 {
4691         struct hwrm_tunnel_dst_port_alloc_output *resp;
4692         struct hwrm_tunnel_dst_port_alloc_input *req;
4693         int rc;
4694
4695         rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4696         if (rc)
4697                 return rc;
4698
4699         req->tunnel_type = tunnel_type;
4700         req->tunnel_dst_port_val = port;
4701
4702         resp = hwrm_req_hold(bp, req);
4703         rc = hwrm_req_send(bp, req);
4704         if (rc) {
4705                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4706                            rc);
4707                 goto err_out;
4708         }
4709
4710         switch (tunnel_type) {
4711         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4712                 bp->vxlan_port = port;
4713                 bp->vxlan_fw_dst_port_id =
4714                         le16_to_cpu(resp->tunnel_dst_port_id);
4715                 break;
4716         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4717                 bp->nge_port = port;
4718                 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4719                 break;
4720         default:
4721                 break;
4722         }
4723
4724 err_out:
4725         hwrm_req_drop(bp, req);
4726         return rc;
4727 }
4728
4729 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4730 {
4731         struct hwrm_cfa_l2_set_rx_mask_input *req;
4732         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4733         int rc;
4734
4735         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4736         if (rc)
4737                 return rc;
4738
4739         req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4740         req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4741         req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4742         req->mask = cpu_to_le32(vnic->rx_mask);
4743         return hwrm_req_send_silent(bp, req);
4744 }
4745
4746 #ifdef CONFIG_RFS_ACCEL
4747 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4748                                             struct bnxt_ntuple_filter *fltr)
4749 {
4750         struct hwrm_cfa_ntuple_filter_free_input *req;
4751         int rc;
4752
4753         rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4754         if (rc)
4755                 return rc;
4756
4757         req->ntuple_filter_id = fltr->filter_id;
4758         return hwrm_req_send(bp, req);
4759 }
4760
4761 #define BNXT_NTP_FLTR_FLAGS                                     \
4762         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4763          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4764          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4765          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4766          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4767          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4768          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4769          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4770          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4771          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4772          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4773          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4774          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4775          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4776
4777 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4778                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4779
4780 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4781                                              struct bnxt_ntuple_filter *fltr)
4782 {
4783         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4784         struct hwrm_cfa_ntuple_filter_alloc_input *req;
4785         struct flow_keys *keys = &fltr->fkeys;
4786         struct bnxt_vnic_info *vnic;
4787         u32 flags = 0;
4788         int rc;
4789
4790         rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4791         if (rc)
4792                 return rc;
4793
4794         req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4795
4796         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4797                 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4798                 req->dst_id = cpu_to_le16(fltr->rxq);
4799         } else {
4800                 vnic = &bp->vnic_info[fltr->rxq + 1];
4801                 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
4802         }
4803         req->flags = cpu_to_le32(flags);
4804         req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4805
4806         req->ethertype = htons(ETH_P_IP);
4807         memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4808         req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4809         req->ip_protocol = keys->basic.ip_proto;
4810
4811         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4812                 int i;
4813
4814                 req->ethertype = htons(ETH_P_IPV6);
4815                 req->ip_addr_type =
4816                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4817                 *(struct in6_addr *)&req->src_ipaddr[0] =
4818                         keys->addrs.v6addrs.src;
4819                 *(struct in6_addr *)&req->dst_ipaddr[0] =
4820                         keys->addrs.v6addrs.dst;
4821                 for (i = 0; i < 4; i++) {
4822                         req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4823                         req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4824                 }
4825         } else {
4826                 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
4827                 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4828                 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4829                 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4830         }
4831         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4832                 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4833                 req->tunnel_type =
4834                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4835         }
4836
4837         req->src_port = keys->ports.src;
4838         req->src_port_mask = cpu_to_be16(0xffff);
4839         req->dst_port = keys->ports.dst;
4840         req->dst_port_mask = cpu_to_be16(0xffff);
4841
4842         resp = hwrm_req_hold(bp, req);
4843         rc = hwrm_req_send(bp, req);
4844         if (!rc)
4845                 fltr->filter_id = resp->ntuple_filter_id;
4846         hwrm_req_drop(bp, req);
4847         return rc;
4848 }
4849 #endif
4850
4851 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4852                                      u8 *mac_addr)
4853 {
4854         struct hwrm_cfa_l2_filter_alloc_output *resp;
4855         struct hwrm_cfa_l2_filter_alloc_input *req;
4856         int rc;
4857
4858         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
4859         if (rc)
4860                 return rc;
4861
4862         req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4863         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4864                 req->flags |=
4865                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4866         req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4867         req->enables =
4868                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4869                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4870                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4871         memcpy(req->l2_addr, mac_addr, ETH_ALEN);
4872         req->l2_addr_mask[0] = 0xff;
4873         req->l2_addr_mask[1] = 0xff;
4874         req->l2_addr_mask[2] = 0xff;
4875         req->l2_addr_mask[3] = 0xff;
4876         req->l2_addr_mask[4] = 0xff;
4877         req->l2_addr_mask[5] = 0xff;
4878
4879         resp = hwrm_req_hold(bp, req);
4880         rc = hwrm_req_send(bp, req);
4881         if (!rc)
4882                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4883                                                         resp->l2_filter_id;
4884         hwrm_req_drop(bp, req);
4885         return rc;
4886 }
4887
4888 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4889 {
4890         struct hwrm_cfa_l2_filter_free_input *req;
4891         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4892         int rc;
4893
4894         /* Any associated ntuple filters will also be cleared by firmware. */
4895         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
4896         if (rc)
4897                 return rc;
4898         hwrm_req_hold(bp, req);
4899         for (i = 0; i < num_of_vnics; i++) {
4900                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4901
4902                 for (j = 0; j < vnic->uc_filter_count; j++) {
4903                         req->l2_filter_id = vnic->fw_l2_filter_id[j];
4904
4905                         rc = hwrm_req_send(bp, req);
4906                 }
4907                 vnic->uc_filter_count = 0;
4908         }
4909         hwrm_req_drop(bp, req);
4910         return rc;
4911 }
4912
4913 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4914 {
4915         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4916         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4917         struct hwrm_vnic_tpa_cfg_input *req;
4918         int rc;
4919
4920         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4921                 return 0;
4922
4923         rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
4924         if (rc)
4925                 return rc;
4926
4927         if (tpa_flags) {
4928                 u16 mss = bp->dev->mtu - 40;
4929                 u32 nsegs, n, segs = 0, flags;
4930
4931                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4932                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4933                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4934                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4935                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4936                 if (tpa_flags & BNXT_FLAG_GRO)
4937                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4938
4939                 req->flags = cpu_to_le32(flags);
4940
4941                 req->enables =
4942                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4943                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4944                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4945
4946                 /* Number of segs are log2 units, and first packet is not
4947                  * included as part of this units.
4948                  */
4949                 if (mss <= BNXT_RX_PAGE_SIZE) {
4950                         n = BNXT_RX_PAGE_SIZE / mss;
4951                         nsegs = (MAX_SKB_FRAGS - 1) * n;
4952                 } else {
4953                         n = mss / BNXT_RX_PAGE_SIZE;
4954                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
4955                                 n++;
4956                         nsegs = (MAX_SKB_FRAGS - n) / n;
4957                 }
4958
4959                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4960                         segs = MAX_TPA_SEGS_P5;
4961                         max_aggs = bp->max_tpa;
4962                 } else {
4963                         segs = ilog2(nsegs);
4964                 }
4965                 req->max_agg_segs = cpu_to_le16(segs);
4966                 req->max_aggs = cpu_to_le16(max_aggs);
4967
4968                 req->min_agg_len = cpu_to_le32(512);
4969         }
4970         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4971
4972         return hwrm_req_send(bp, req);
4973 }
4974
4975 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4976 {
4977         struct bnxt_ring_grp_info *grp_info;
4978
4979         grp_info = &bp->grp_info[ring->grp_idx];
4980         return grp_info->cp_fw_ring_id;
4981 }
4982
4983 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4984 {
4985         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4986                 struct bnxt_napi *bnapi = rxr->bnapi;
4987                 struct bnxt_cp_ring_info *cpr;
4988
4989                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4990                 return cpr->cp_ring_struct.fw_ring_id;
4991         } else {
4992                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4993         }
4994 }
4995
4996 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4997 {
4998         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4999                 struct bnxt_napi *bnapi = txr->bnapi;
5000                 struct bnxt_cp_ring_info *cpr;
5001
5002                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5003                 return cpr->cp_ring_struct.fw_ring_id;
5004         } else {
5005                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5006         }
5007 }
5008
5009 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5010 {
5011         int entries;
5012
5013         if (bp->flags & BNXT_FLAG_CHIP_P5)
5014                 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5015         else
5016                 entries = HW_HASH_INDEX_SIZE;
5017
5018         bp->rss_indir_tbl_entries = entries;
5019         bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5020                                           GFP_KERNEL);
5021         if (!bp->rss_indir_tbl)
5022                 return -ENOMEM;
5023         return 0;
5024 }
5025
5026 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5027 {
5028         u16 max_rings, max_entries, pad, i;
5029
5030         if (!bp->rx_nr_rings)
5031                 return;
5032
5033         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5034                 max_rings = bp->rx_nr_rings - 1;
5035         else
5036                 max_rings = bp->rx_nr_rings;
5037
5038         max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5039
5040         for (i = 0; i < max_entries; i++)
5041                 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5042
5043         pad = bp->rss_indir_tbl_entries - max_entries;
5044         if (pad)
5045                 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5046 }
5047
5048 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5049 {
5050         u16 i, tbl_size, max_ring = 0;
5051
5052         if (!bp->rss_indir_tbl)
5053                 return 0;
5054
5055         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5056         for (i = 0; i < tbl_size; i++)
5057                 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5058         return max_ring;
5059 }
5060
5061 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5062 {
5063         if (bp->flags & BNXT_FLAG_CHIP_P5)
5064                 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5065         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5066                 return 2;
5067         return 1;
5068 }
5069
5070 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5071 {
5072         bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5073         u16 i, j;
5074
5075         /* Fill the RSS indirection table with ring group ids */
5076         for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5077                 if (!no_rss)
5078                         j = bp->rss_indir_tbl[i];
5079                 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5080         }
5081 }
5082
5083 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5084                                       struct bnxt_vnic_info *vnic)
5085 {
5086         __le16 *ring_tbl = vnic->rss_table;
5087         struct bnxt_rx_ring_info *rxr;
5088         u16 tbl_size, i;
5089
5090         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5091
5092         for (i = 0; i < tbl_size; i++) {
5093                 u16 ring_id, j;
5094
5095                 j = bp->rss_indir_tbl[i];
5096                 rxr = &bp->rx_ring[j];
5097
5098                 ring_id = rxr->rx_ring_struct.fw_ring_id;
5099                 *ring_tbl++ = cpu_to_le16(ring_id);
5100                 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5101                 *ring_tbl++ = cpu_to_le16(ring_id);
5102         }
5103 }
5104
5105 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5106 {
5107         if (bp->flags & BNXT_FLAG_CHIP_P5)
5108                 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5109         else
5110                 __bnxt_fill_hw_rss_tbl(bp, vnic);
5111 }
5112
5113 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5114 {
5115         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5116         struct hwrm_vnic_rss_cfg_input *req;
5117         int rc;
5118
5119         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5120             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5121                 return 0;
5122
5123         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5124         if (rc)
5125                 return rc;
5126
5127         if (set_rss) {
5128                 bnxt_fill_hw_rss_tbl(bp, vnic);
5129                 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5130                 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5131                 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5132                 req->hash_key_tbl_addr =
5133                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
5134         }
5135         req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5136         return hwrm_req_send(bp, req);
5137 }
5138
5139 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5140 {
5141         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5142         struct hwrm_vnic_rss_cfg_input *req;
5143         dma_addr_t ring_tbl_map;
5144         u32 i, nr_ctxs;
5145         int rc;
5146
5147         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5148         if (rc)
5149                 return rc;
5150
5151         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5152         if (!set_rss)
5153                 return hwrm_req_send(bp, req);
5154
5155         bnxt_fill_hw_rss_tbl(bp, vnic);
5156         req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5157         req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5158         req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5159         ring_tbl_map = vnic->rss_table_dma_addr;
5160         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5161
5162         hwrm_req_hold(bp, req);
5163         for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5164                 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5165                 req->ring_table_pair_index = i;
5166                 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5167                 rc = hwrm_req_send(bp, req);
5168                 if (rc)
5169                         goto exit;
5170         }
5171
5172 exit:
5173         hwrm_req_drop(bp, req);
5174         return rc;
5175 }
5176
5177 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5178 {
5179         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5180         struct hwrm_vnic_plcmodes_cfg_input *req;
5181         int rc;
5182
5183         rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5184         if (rc)
5185                 return rc;
5186
5187         req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5188                                  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5189                                  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5190         req->enables =
5191                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5192                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5193         /* thresholds not implemented in firmware yet */
5194         req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5195         req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5196         req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5197         return hwrm_req_send(bp, req);
5198 }
5199
5200 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5201                                         u16 ctx_idx)
5202 {
5203         struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
5204
5205         if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5206                 return;
5207
5208         req->rss_cos_lb_ctx_id =
5209                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5210
5211         hwrm_req_send(bp, req);
5212         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5213 }
5214
5215 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5216 {
5217         int i, j;
5218
5219         for (i = 0; i < bp->nr_vnics; i++) {
5220                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5221
5222                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5223                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5224                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5225                 }
5226         }
5227         bp->rsscos_nr_ctxs = 0;
5228 }
5229
5230 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5231 {
5232         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5233         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
5234         int rc;
5235
5236         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5237         if (rc)
5238                 return rc;
5239
5240         resp = hwrm_req_hold(bp, req);
5241         rc = hwrm_req_send(bp, req);
5242         if (!rc)
5243                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5244                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
5245         hwrm_req_drop(bp, req);
5246
5247         return rc;
5248 }
5249
5250 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5251 {
5252         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5253                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5254         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5255 }
5256
5257 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5258 {
5259         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5260         struct hwrm_vnic_cfg_input *req;
5261         unsigned int ring = 0, grp_idx;
5262         u16 def_vlan = 0;
5263         int rc;
5264
5265         rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5266         if (rc)
5267                 return rc;
5268
5269         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5270                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5271
5272                 req->default_rx_ring_id =
5273                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5274                 req->default_cmpl_ring_id =
5275                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5276                 req->enables =
5277                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5278                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5279                 goto vnic_mru;
5280         }
5281         req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5282         /* Only RSS support for now TBD: COS & LB */
5283         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5284                 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5285                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5286                                            VNIC_CFG_REQ_ENABLES_MRU);
5287         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5288                 req->rss_rule =
5289                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5290                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5291                                            VNIC_CFG_REQ_ENABLES_MRU);
5292                 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5293         } else {
5294                 req->rss_rule = cpu_to_le16(0xffff);
5295         }
5296
5297         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5298             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5299                 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5300                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5301         } else {
5302                 req->cos_rule = cpu_to_le16(0xffff);
5303         }
5304
5305         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5306                 ring = 0;
5307         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5308                 ring = vnic_id - 1;
5309         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5310                 ring = bp->rx_nr_rings - 1;
5311
5312         grp_idx = bp->rx_ring[ring].bnapi->index;
5313         req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5314         req->lb_rule = cpu_to_le16(0xffff);
5315 vnic_mru:
5316         req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5317
5318         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5319 #ifdef CONFIG_BNXT_SRIOV
5320         if (BNXT_VF(bp))
5321                 def_vlan = bp->vf.vlan;
5322 #endif
5323         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5324                 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5325         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5326                 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5327
5328         return hwrm_req_send(bp, req);
5329 }
5330
5331 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5332 {
5333         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5334                 struct hwrm_vnic_free_input *req;
5335
5336                 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5337                         return;
5338
5339                 req->vnic_id =
5340                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5341
5342                 hwrm_req_send(bp, req);
5343                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5344         }
5345 }
5346
5347 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5348 {
5349         u16 i;
5350
5351         for (i = 0; i < bp->nr_vnics; i++)
5352                 bnxt_hwrm_vnic_free_one(bp, i);
5353 }
5354
5355 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5356                                 unsigned int start_rx_ring_idx,
5357                                 unsigned int nr_rings)
5358 {
5359         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5360         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5361         struct hwrm_vnic_alloc_output *resp;
5362         struct hwrm_vnic_alloc_input *req;
5363         int rc;
5364
5365         rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5366         if (rc)
5367                 return rc;
5368
5369         if (bp->flags & BNXT_FLAG_CHIP_P5)
5370                 goto vnic_no_ring_grps;
5371
5372         /* map ring groups to this vnic */
5373         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5374                 grp_idx = bp->rx_ring[i].bnapi->index;
5375                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5376                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5377                                    j, nr_rings);
5378                         break;
5379                 }
5380                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5381         }
5382
5383 vnic_no_ring_grps:
5384         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5385                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5386         if (vnic_id == 0)
5387                 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5388
5389         resp = hwrm_req_hold(bp, req);
5390         rc = hwrm_req_send(bp, req);
5391         if (!rc)
5392                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5393         hwrm_req_drop(bp, req);
5394         return rc;
5395 }
5396
5397 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5398 {
5399         struct hwrm_vnic_qcaps_output *resp;
5400         struct hwrm_vnic_qcaps_input *req;
5401         int rc;
5402
5403         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5404         bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5405         if (bp->hwrm_spec_code < 0x10600)
5406                 return 0;
5407
5408         rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5409         if (rc)
5410                 return rc;
5411
5412         resp = hwrm_req_hold(bp, req);
5413         rc = hwrm_req_send(bp, req);
5414         if (!rc) {
5415                 u32 flags = le32_to_cpu(resp->flags);
5416
5417                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5418                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5419                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5420                 if (flags &
5421                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5422                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5423
5424                 /* Older P5 fw before EXT_HW_STATS support did not set
5425                  * VLAN_STRIP_CAP properly.
5426                  */
5427                 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5428                     (BNXT_CHIP_P5_THOR(bp) &&
5429                      !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5430                         bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5431                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5432                 if (bp->max_tpa_v2) {
5433                         if (BNXT_CHIP_P5_THOR(bp))
5434                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5435                         else
5436                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5437                 }
5438         }
5439         hwrm_req_drop(bp, req);
5440         return rc;
5441 }
5442
5443 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5444 {
5445         struct hwrm_ring_grp_alloc_output *resp;
5446         struct hwrm_ring_grp_alloc_input *req;
5447         int rc;
5448         u16 i;
5449
5450         if (bp->flags & BNXT_FLAG_CHIP_P5)
5451                 return 0;
5452
5453         rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5454         if (rc)
5455                 return rc;
5456
5457         resp = hwrm_req_hold(bp, req);
5458         for (i = 0; i < bp->rx_nr_rings; i++) {
5459                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5460
5461                 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5462                 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5463                 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5464                 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5465
5466                 rc = hwrm_req_send(bp, req);
5467
5468                 if (rc)
5469                         break;
5470
5471                 bp->grp_info[grp_idx].fw_grp_id =
5472                         le32_to_cpu(resp->ring_group_id);
5473         }
5474         hwrm_req_drop(bp, req);
5475         return rc;
5476 }
5477
5478 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5479 {
5480         struct hwrm_ring_grp_free_input *req;
5481         u16 i;
5482
5483         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5484                 return;
5485
5486         if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5487                 return;
5488
5489         hwrm_req_hold(bp, req);
5490         for (i = 0; i < bp->cp_nr_rings; i++) {
5491                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5492                         continue;
5493                 req->ring_group_id =
5494                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
5495
5496                 hwrm_req_send(bp, req);
5497                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5498         }
5499         hwrm_req_drop(bp, req);
5500 }
5501
5502 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5503                                     struct bnxt_ring_struct *ring,
5504                                     u32 ring_type, u32 map_index)
5505 {
5506         struct hwrm_ring_alloc_output *resp;
5507         struct hwrm_ring_alloc_input *req;
5508         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5509         struct bnxt_ring_grp_info *grp_info;
5510         int rc, err = 0;
5511         u16 ring_id;
5512
5513         rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5514         if (rc)
5515                 goto exit;
5516
5517         req->enables = 0;
5518         if (rmem->nr_pages > 1) {
5519                 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5520                 /* Page size is in log2 units */
5521                 req->page_size = BNXT_PAGE_SHIFT;
5522                 req->page_tbl_depth = 1;
5523         } else {
5524                 req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5525         }
5526         req->fbo = 0;
5527         /* Association of ring index with doorbell index and MSIX number */
5528         req->logical_id = cpu_to_le16(map_index);
5529
5530         switch (ring_type) {
5531         case HWRM_RING_ALLOC_TX: {
5532                 struct bnxt_tx_ring_info *txr;
5533
5534                 txr = container_of(ring, struct bnxt_tx_ring_info,
5535                                    tx_ring_struct);
5536                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5537                 /* Association of transmit ring with completion ring */
5538                 grp_info = &bp->grp_info[ring->grp_idx];
5539                 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5540                 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5541                 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5542                 req->queue_id = cpu_to_le16(ring->queue_id);
5543                 break;
5544         }
5545         case HWRM_RING_ALLOC_RX:
5546                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5547                 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
5548                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5549                         u16 flags = 0;
5550
5551                         /* Association of rx ring with stats context */
5552                         grp_info = &bp->grp_info[ring->grp_idx];
5553                         req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5554                         req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5555                         req->enables |= cpu_to_le32(
5556                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5557                         if (NET_IP_ALIGN == 2)
5558                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5559                         req->flags = cpu_to_le16(flags);
5560                 }
5561                 break;
5562         case HWRM_RING_ALLOC_AGG:
5563                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5564                         req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5565                         /* Association of agg ring with rx ring */
5566                         grp_info = &bp->grp_info[ring->grp_idx];
5567                         req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5568                         req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5569                         req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5570                         req->enables |= cpu_to_le32(
5571                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5572                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5573                 } else {
5574                         req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5575                 }
5576                 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5577                 break;
5578         case HWRM_RING_ALLOC_CMPL:
5579                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5580                 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5581                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5582                         /* Association of cp ring with nq */
5583                         grp_info = &bp->grp_info[map_index];
5584                         req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5585                         req->cq_handle = cpu_to_le64(ring->handle);
5586                         req->enables |= cpu_to_le32(
5587                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5588                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5589                         req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5590                 }
5591                 break;
5592         case HWRM_RING_ALLOC_NQ:
5593                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5594                 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5595                 if (bp->flags & BNXT_FLAG_USING_MSIX)
5596                         req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5597                 break;
5598         default:
5599                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5600                            ring_type);
5601                 return -1;
5602         }
5603
5604         resp = hwrm_req_hold(bp, req);
5605         rc = hwrm_req_send(bp, req);
5606         err = le16_to_cpu(resp->error_code);
5607         ring_id = le16_to_cpu(resp->ring_id);
5608         hwrm_req_drop(bp, req);
5609
5610 exit:
5611         if (rc || err) {
5612                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5613                            ring_type, rc, err);
5614                 return -EIO;
5615         }
5616         ring->fw_ring_id = ring_id;
5617         return rc;
5618 }
5619
5620 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5621 {
5622         int rc;
5623
5624         if (BNXT_PF(bp)) {
5625                 struct hwrm_func_cfg_input *req;
5626
5627                 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5628                 if (rc)
5629                         return rc;
5630
5631                 req->fid = cpu_to_le16(0xffff);
5632                 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5633                 req->async_event_cr = cpu_to_le16(idx);
5634                 return hwrm_req_send(bp, req);
5635         } else {
5636                 struct hwrm_func_vf_cfg_input *req;
5637
5638                 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5639                 if (rc)
5640                         return rc;
5641
5642                 req->enables =
5643                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5644                 req->async_event_cr = cpu_to_le16(idx);
5645                 return hwrm_req_send(bp, req);
5646         }
5647 }
5648
5649 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5650                         u32 map_idx, u32 xid)
5651 {
5652         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5653                 if (BNXT_PF(bp))
5654                         db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5655                 else
5656                         db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5657                 switch (ring_type) {
5658                 case HWRM_RING_ALLOC_TX:
5659                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5660                         break;
5661                 case HWRM_RING_ALLOC_RX:
5662                 case HWRM_RING_ALLOC_AGG:
5663                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5664                         break;
5665                 case HWRM_RING_ALLOC_CMPL:
5666                         db->db_key64 = DBR_PATH_L2;
5667                         break;
5668                 case HWRM_RING_ALLOC_NQ:
5669                         db->db_key64 = DBR_PATH_L2;
5670                         break;
5671                 }
5672                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5673         } else {
5674                 db->doorbell = bp->bar1 + map_idx * 0x80;
5675                 switch (ring_type) {
5676                 case HWRM_RING_ALLOC_TX:
5677                         db->db_key32 = DB_KEY_TX;
5678                         break;
5679                 case HWRM_RING_ALLOC_RX:
5680                 case HWRM_RING_ALLOC_AGG:
5681                         db->db_key32 = DB_KEY_RX;
5682                         break;
5683                 case HWRM_RING_ALLOC_CMPL:
5684                         db->db_key32 = DB_KEY_CP;
5685                         break;
5686                 }
5687         }
5688 }
5689
5690 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5691 {
5692         bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5693         int i, rc = 0;
5694         u32 type;
5695
5696         if (bp->flags & BNXT_FLAG_CHIP_P5)
5697                 type = HWRM_RING_ALLOC_NQ;
5698         else
5699                 type = HWRM_RING_ALLOC_CMPL;
5700         for (i = 0; i < bp->cp_nr_rings; i++) {
5701                 struct bnxt_napi *bnapi = bp->bnapi[i];
5702                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5703                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5704                 u32 map_idx = ring->map_idx;
5705                 unsigned int vector;
5706
5707                 vector = bp->irq_tbl[map_idx].vector;
5708                 disable_irq_nosync(vector);
5709                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5710                 if (rc) {
5711                         enable_irq(vector);
5712                         goto err_out;
5713                 }
5714                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5715                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5716                 enable_irq(vector);
5717                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5718
5719                 if (!i) {
5720                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5721                         if (rc)
5722                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5723                 }
5724         }
5725
5726         type = HWRM_RING_ALLOC_TX;
5727         for (i = 0; i < bp->tx_nr_rings; i++) {
5728                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5729                 struct bnxt_ring_struct *ring;
5730                 u32 map_idx;
5731
5732                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5733                         struct bnxt_napi *bnapi = txr->bnapi;
5734                         struct bnxt_cp_ring_info *cpr, *cpr2;
5735                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5736
5737                         cpr = &bnapi->cp_ring;
5738                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5739                         ring = &cpr2->cp_ring_struct;
5740                         ring->handle = BNXT_TX_HDL;
5741                         map_idx = bnapi->index;
5742                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5743                         if (rc)
5744                                 goto err_out;
5745                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5746                                     ring->fw_ring_id);
5747                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5748                 }
5749                 ring = &txr->tx_ring_struct;
5750                 map_idx = i;
5751                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5752                 if (rc)
5753                         goto err_out;
5754                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5755         }
5756
5757         type = HWRM_RING_ALLOC_RX;
5758         for (i = 0; i < bp->rx_nr_rings; i++) {
5759                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5760                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5761                 struct bnxt_napi *bnapi = rxr->bnapi;
5762                 u32 map_idx = bnapi->index;
5763
5764                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5765                 if (rc)
5766                         goto err_out;
5767                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5768                 /* If we have agg rings, post agg buffers first. */
5769                 if (!agg_rings)
5770                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5771                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5772                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5773                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5774                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5775                         struct bnxt_cp_ring_info *cpr2;
5776
5777                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5778                         ring = &cpr2->cp_ring_struct;
5779                         ring->handle = BNXT_RX_HDL;
5780                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5781                         if (rc)
5782                                 goto err_out;
5783                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5784                                     ring->fw_ring_id);
5785                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5786                 }
5787         }
5788
5789         if (agg_rings) {
5790                 type = HWRM_RING_ALLOC_AGG;
5791                 for (i = 0; i < bp->rx_nr_rings; i++) {
5792                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5793                         struct bnxt_ring_struct *ring =
5794                                                 &rxr->rx_agg_ring_struct;
5795                         u32 grp_idx = ring->grp_idx;
5796                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5797
5798                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5799                         if (rc)
5800                                 goto err_out;
5801
5802                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5803                                     ring->fw_ring_id);
5804                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5805                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5806                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5807                 }
5808         }
5809 err_out:
5810         return rc;
5811 }
5812
5813 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5814                                    struct bnxt_ring_struct *ring,
5815                                    u32 ring_type, int cmpl_ring_id)
5816 {
5817         struct hwrm_ring_free_output *resp;
5818         struct hwrm_ring_free_input *req;
5819         u16 error_code = 0;
5820         int rc;
5821
5822         if (BNXT_NO_FW_ACCESS(bp))
5823                 return 0;
5824
5825         rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
5826         if (rc)
5827                 goto exit;
5828
5829         req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
5830         req->ring_type = ring_type;
5831         req->ring_id = cpu_to_le16(ring->fw_ring_id);
5832
5833         resp = hwrm_req_hold(bp, req);
5834         rc = hwrm_req_send(bp, req);
5835         error_code = le16_to_cpu(resp->error_code);
5836         hwrm_req_drop(bp, req);
5837 exit:
5838         if (rc || error_code) {
5839                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5840                            ring_type, rc, error_code);
5841                 return -EIO;
5842         }
5843         return 0;
5844 }
5845
5846 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5847 {
5848         u32 type;
5849         int i;
5850
5851         if (!bp->bnapi)
5852                 return;
5853
5854         for (i = 0; i < bp->tx_nr_rings; i++) {
5855                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5856                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5857
5858                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5859                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5860
5861                         hwrm_ring_free_send_msg(bp, ring,
5862                                                 RING_FREE_REQ_RING_TYPE_TX,
5863                                                 close_path ? cmpl_ring_id :
5864                                                 INVALID_HW_RING_ID);
5865                         ring->fw_ring_id = INVALID_HW_RING_ID;
5866                 }
5867         }
5868
5869         for (i = 0; i < bp->rx_nr_rings; i++) {
5870                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5871                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5872                 u32 grp_idx = rxr->bnapi->index;
5873
5874                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5875                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5876
5877                         hwrm_ring_free_send_msg(bp, ring,
5878                                                 RING_FREE_REQ_RING_TYPE_RX,
5879                                                 close_path ? cmpl_ring_id :
5880                                                 INVALID_HW_RING_ID);
5881                         ring->fw_ring_id = INVALID_HW_RING_ID;
5882                         bp->grp_info[grp_idx].rx_fw_ring_id =
5883                                 INVALID_HW_RING_ID;
5884                 }
5885         }
5886
5887         if (bp->flags & BNXT_FLAG_CHIP_P5)
5888                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5889         else
5890                 type = RING_FREE_REQ_RING_TYPE_RX;
5891         for (i = 0; i < bp->rx_nr_rings; i++) {
5892                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5893                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5894                 u32 grp_idx = rxr->bnapi->index;
5895
5896                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5897                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5898
5899                         hwrm_ring_free_send_msg(bp, ring, type,
5900                                                 close_path ? cmpl_ring_id :
5901                                                 INVALID_HW_RING_ID);
5902                         ring->fw_ring_id = INVALID_HW_RING_ID;
5903                         bp->grp_info[grp_idx].agg_fw_ring_id =
5904                                 INVALID_HW_RING_ID;
5905                 }
5906         }
5907
5908         /* The completion rings are about to be freed.  After that the
5909          * IRQ doorbell will not work anymore.  So we need to disable
5910          * IRQ here.
5911          */
5912         bnxt_disable_int_sync(bp);
5913
5914         if (bp->flags & BNXT_FLAG_CHIP_P5)
5915                 type = RING_FREE_REQ_RING_TYPE_NQ;
5916         else
5917                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5918         for (i = 0; i < bp->cp_nr_rings; i++) {
5919                 struct bnxt_napi *bnapi = bp->bnapi[i];
5920                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5921                 struct bnxt_ring_struct *ring;
5922                 int j;
5923
5924                 for (j = 0; j < 2; j++) {
5925                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5926
5927                         if (cpr2) {
5928                                 ring = &cpr2->cp_ring_struct;
5929                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5930                                         continue;
5931                                 hwrm_ring_free_send_msg(bp, ring,
5932                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
5933                                         INVALID_HW_RING_ID);
5934                                 ring->fw_ring_id = INVALID_HW_RING_ID;
5935                         }
5936                 }
5937                 ring = &cpr->cp_ring_struct;
5938                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5939                         hwrm_ring_free_send_msg(bp, ring, type,
5940                                                 INVALID_HW_RING_ID);
5941                         ring->fw_ring_id = INVALID_HW_RING_ID;
5942                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5943                 }
5944         }
5945 }
5946
5947 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5948                            bool shared);
5949
5950 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5951 {
5952         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5953         struct hwrm_func_qcfg_output *resp;
5954         struct hwrm_func_qcfg_input *req;
5955         int rc;
5956
5957         if (bp->hwrm_spec_code < 0x10601)
5958                 return 0;
5959
5960         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
5961         if (rc)
5962                 return rc;
5963
5964         req->fid = cpu_to_le16(0xffff);
5965         resp = hwrm_req_hold(bp, req);
5966         rc = hwrm_req_send(bp, req);
5967         if (rc) {
5968                 hwrm_req_drop(bp, req);
5969                 return rc;
5970         }
5971
5972         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5973         if (BNXT_NEW_RM(bp)) {
5974                 u16 cp, stats;
5975
5976                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5977                 hw_resc->resv_hw_ring_grps =
5978                         le32_to_cpu(resp->alloc_hw_ring_grps);
5979                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5980                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5981                 stats = le16_to_cpu(resp->alloc_stat_ctx);
5982                 hw_resc->resv_irqs = cp;
5983                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5984                         int rx = hw_resc->resv_rx_rings;
5985                         int tx = hw_resc->resv_tx_rings;
5986
5987                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5988                                 rx >>= 1;
5989                         if (cp < (rx + tx)) {
5990                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5991                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5992                                         rx <<= 1;
5993                                 hw_resc->resv_rx_rings = rx;
5994                                 hw_resc->resv_tx_rings = tx;
5995                         }
5996                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5997                         hw_resc->resv_hw_ring_grps = rx;
5998                 }
5999                 hw_resc->resv_cp_rings = cp;
6000                 hw_resc->resv_stat_ctxs = stats;
6001         }
6002         hwrm_req_drop(bp, req);
6003         return 0;
6004 }
6005
6006 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6007 {
6008         struct hwrm_func_qcfg_output *resp;
6009         struct hwrm_func_qcfg_input *req;
6010         int rc;
6011
6012         if (bp->hwrm_spec_code < 0x10601)
6013                 return 0;
6014
6015         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6016         if (rc)
6017                 return rc;
6018
6019         req->fid = cpu_to_le16(fid);
6020         resp = hwrm_req_hold(bp, req);
6021         rc = hwrm_req_send(bp, req);
6022         if (!rc)
6023                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6024
6025         hwrm_req_drop(bp, req);
6026         return rc;
6027 }
6028
6029 static bool bnxt_rfs_supported(struct bnxt *bp);
6030
6031 static struct hwrm_func_cfg_input *
6032 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6033                              int ring_grps, int cp_rings, int stats, int vnics)
6034 {
6035         struct hwrm_func_cfg_input *req;
6036         u32 enables = 0;
6037
6038         if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6039                 return NULL;
6040
6041         req->fid = cpu_to_le16(0xffff);
6042         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6043         req->num_tx_rings = cpu_to_le16(tx_rings);
6044         if (BNXT_NEW_RM(bp)) {
6045                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6046                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6047                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6048                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6049                         enables |= tx_rings + ring_grps ?
6050                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6051                         enables |= rx_rings ?
6052                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6053                 } else {
6054                         enables |= cp_rings ?
6055                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6056                         enables |= ring_grps ?
6057                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6058                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6059                 }
6060                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6061
6062                 req->num_rx_rings = cpu_to_le16(rx_rings);
6063                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6064                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6065                         req->num_msix = cpu_to_le16(cp_rings);
6066                         req->num_rsscos_ctxs =
6067                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6068                 } else {
6069                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
6070                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6071                         req->num_rsscos_ctxs = cpu_to_le16(1);
6072                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6073                             bnxt_rfs_supported(bp))
6074                                 req->num_rsscos_ctxs =
6075                                         cpu_to_le16(ring_grps + 1);
6076                 }
6077                 req->num_stat_ctxs = cpu_to_le16(stats);
6078                 req->num_vnics = cpu_to_le16(vnics);
6079         }
6080         req->enables = cpu_to_le32(enables);
6081         return req;
6082 }
6083
6084 static struct hwrm_func_vf_cfg_input *
6085 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6086                              int ring_grps, int cp_rings, int stats, int vnics)
6087 {
6088         struct hwrm_func_vf_cfg_input *req;
6089         u32 enables = 0;
6090
6091         if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6092                 return NULL;
6093
6094         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6095         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6096                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6097         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6098         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6099                 enables |= tx_rings + ring_grps ?
6100                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6101         } else {
6102                 enables |= cp_rings ?
6103                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6104                 enables |= ring_grps ?
6105                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6106         }
6107         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6108         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6109
6110         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6111         req->num_tx_rings = cpu_to_le16(tx_rings);
6112         req->num_rx_rings = cpu_to_le16(rx_rings);
6113         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6114                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6115                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6116         } else {
6117                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6118                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6119                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6120         }
6121         req->num_stat_ctxs = cpu_to_le16(stats);
6122         req->num_vnics = cpu_to_le16(vnics);
6123
6124         req->enables = cpu_to_le32(enables);
6125         return req;
6126 }
6127
6128 static int
6129 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6130                            int ring_grps, int cp_rings, int stats, int vnics)
6131 {
6132         struct hwrm_func_cfg_input *req;
6133         int rc;
6134
6135         req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6136                                            cp_rings, stats, vnics);
6137         if (!req)
6138                 return -ENOMEM;
6139
6140         if (!req->enables) {
6141                 hwrm_req_drop(bp, req);
6142                 return 0;
6143         }
6144
6145         rc = hwrm_req_send(bp, req);
6146         if (rc)
6147                 return rc;
6148
6149         if (bp->hwrm_spec_code < 0x10601)
6150                 bp->hw_resc.resv_tx_rings = tx_rings;
6151
6152         return bnxt_hwrm_get_rings(bp);
6153 }
6154
6155 static int
6156 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6157                            int ring_grps, int cp_rings, int stats, int vnics)
6158 {
6159         struct hwrm_func_vf_cfg_input *req;
6160         int rc;
6161
6162         if (!BNXT_NEW_RM(bp)) {
6163                 bp->hw_resc.resv_tx_rings = tx_rings;
6164                 return 0;
6165         }
6166
6167         req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6168                                            cp_rings, stats, vnics);
6169         if (!req)
6170                 return -ENOMEM;
6171
6172         rc = hwrm_req_send(bp, req);
6173         if (rc)
6174                 return rc;
6175
6176         return bnxt_hwrm_get_rings(bp);
6177 }
6178
6179 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6180                                    int cp, int stat, int vnic)
6181 {
6182         if (BNXT_PF(bp))
6183                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6184                                                   vnic);
6185         else
6186                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6187                                                   vnic);
6188 }
6189
6190 int bnxt_nq_rings_in_use(struct bnxt *bp)
6191 {
6192         int cp = bp->cp_nr_rings;
6193         int ulp_msix, ulp_base;
6194
6195         ulp_msix = bnxt_get_ulp_msix_num(bp);
6196         if (ulp_msix) {
6197                 ulp_base = bnxt_get_ulp_msix_base(bp);
6198                 cp += ulp_msix;
6199                 if ((ulp_base + ulp_msix) > cp)
6200                         cp = ulp_base + ulp_msix;
6201         }
6202         return cp;
6203 }
6204
6205 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6206 {
6207         int cp;
6208
6209         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6210                 return bnxt_nq_rings_in_use(bp);
6211
6212         cp = bp->tx_nr_rings + bp->rx_nr_rings;
6213         return cp;
6214 }
6215
6216 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6217 {
6218         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6219         int cp = bp->cp_nr_rings;
6220
6221         if (!ulp_stat)
6222                 return cp;
6223
6224         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6225                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6226
6227         return cp + ulp_stat;
6228 }
6229
6230 /* Check if a default RSS map needs to be setup.  This function is only
6231  * used on older firmware that does not require reserving RX rings.
6232  */
6233 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6234 {
6235         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6236
6237         /* The RSS map is valid for RX rings set to resv_rx_rings */
6238         if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6239                 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6240                 if (!netif_is_rxfh_configured(bp->dev))
6241                         bnxt_set_dflt_rss_indir_tbl(bp);
6242         }
6243 }
6244
6245 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6246 {
6247         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6248         int cp = bnxt_cp_rings_in_use(bp);
6249         int nq = bnxt_nq_rings_in_use(bp);
6250         int rx = bp->rx_nr_rings, stat;
6251         int vnic = 1, grp = rx;
6252
6253         if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6254             bp->hwrm_spec_code >= 0x10601)
6255                 return true;
6256
6257         /* Old firmware does not need RX ring reservations but we still
6258          * need to setup a default RSS map when needed.  With new firmware
6259          * we go through RX ring reservations first and then set up the
6260          * RSS map for the successfully reserved RX rings when needed.
6261          */
6262         if (!BNXT_NEW_RM(bp)) {
6263                 bnxt_check_rss_tbl_no_rmgr(bp);
6264                 return false;
6265         }
6266         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6267                 vnic = rx + 1;
6268         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6269                 rx <<= 1;
6270         stat = bnxt_get_func_stat_ctxs(bp);
6271         if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6272             hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6273             (hw_resc->resv_hw_ring_grps != grp &&
6274              !(bp->flags & BNXT_FLAG_CHIP_P5)))
6275                 return true;
6276         if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6277             hw_resc->resv_irqs != nq)
6278                 return true;
6279         return false;
6280 }
6281
6282 static int __bnxt_reserve_rings(struct bnxt *bp)
6283 {
6284         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6285         int cp = bnxt_nq_rings_in_use(bp);
6286         int tx = bp->tx_nr_rings;
6287         int rx = bp->rx_nr_rings;
6288         int grp, rx_rings, rc;
6289         int vnic = 1, stat;
6290         bool sh = false;
6291
6292         if (!bnxt_need_reserve_rings(bp))
6293                 return 0;
6294
6295         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6296                 sh = true;
6297         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6298                 vnic = rx + 1;
6299         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6300                 rx <<= 1;
6301         grp = bp->rx_nr_rings;
6302         stat = bnxt_get_func_stat_ctxs(bp);
6303
6304         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6305         if (rc)
6306                 return rc;
6307
6308         tx = hw_resc->resv_tx_rings;
6309         if (BNXT_NEW_RM(bp)) {
6310                 rx = hw_resc->resv_rx_rings;
6311                 cp = hw_resc->resv_irqs;
6312                 grp = hw_resc->resv_hw_ring_grps;
6313                 vnic = hw_resc->resv_vnics;
6314                 stat = hw_resc->resv_stat_ctxs;
6315         }
6316
6317         rx_rings = rx;
6318         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6319                 if (rx >= 2) {
6320                         rx_rings = rx >> 1;
6321                 } else {
6322                         if (netif_running(bp->dev))
6323                                 return -ENOMEM;
6324
6325                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6326                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6327                         bp->dev->hw_features &= ~NETIF_F_LRO;
6328                         bp->dev->features &= ~NETIF_F_LRO;
6329                         bnxt_set_ring_params(bp);
6330                 }
6331         }
6332         rx_rings = min_t(int, rx_rings, grp);
6333         cp = min_t(int, cp, bp->cp_nr_rings);
6334         if (stat > bnxt_get_ulp_stat_ctxs(bp))
6335                 stat -= bnxt_get_ulp_stat_ctxs(bp);
6336         cp = min_t(int, cp, stat);
6337         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6338         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6339                 rx = rx_rings << 1;
6340         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6341         bp->tx_nr_rings = tx;
6342
6343         /* If we cannot reserve all the RX rings, reset the RSS map only
6344          * if absolutely necessary
6345          */
6346         if (rx_rings != bp->rx_nr_rings) {
6347                 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6348                             rx_rings, bp->rx_nr_rings);
6349                 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6350                     (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6351                      bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6352                      bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6353                         netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6354                         bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6355                 }
6356         }
6357         bp->rx_nr_rings = rx_rings;
6358         bp->cp_nr_rings = cp;
6359
6360         if (!tx || !rx || !cp || !grp || !vnic || !stat)
6361                 return -ENOMEM;
6362
6363         if (!netif_is_rxfh_configured(bp->dev))
6364                 bnxt_set_dflt_rss_indir_tbl(bp);
6365
6366         return rc;
6367 }
6368
6369 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6370                                     int ring_grps, int cp_rings, int stats,
6371                                     int vnics)
6372 {
6373         struct hwrm_func_vf_cfg_input *req;
6374         u32 flags;
6375
6376         if (!BNXT_NEW_RM(bp))
6377                 return 0;
6378
6379         req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6380                                            cp_rings, stats, vnics);
6381         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6382                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6383                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6384                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6385                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6386                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6387         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6388                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6389
6390         req->flags = cpu_to_le32(flags);
6391         return hwrm_req_send_silent(bp, req);
6392 }
6393
6394 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6395                                     int ring_grps, int cp_rings, int stats,
6396                                     int vnics)
6397 {
6398         struct hwrm_func_cfg_input *req;
6399         u32 flags;
6400
6401         req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6402                                            cp_rings, stats, vnics);
6403         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6404         if (BNXT_NEW_RM(bp)) {
6405                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6406                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6407                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6408                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6409                 if (bp->flags & BNXT_FLAG_CHIP_P5)
6410                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6411                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6412                 else
6413                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6414         }
6415
6416         req->flags = cpu_to_le32(flags);
6417         return hwrm_req_send_silent(bp, req);
6418 }
6419
6420 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6421                                  int ring_grps, int cp_rings, int stats,
6422                                  int vnics)
6423 {
6424         if (bp->hwrm_spec_code < 0x10801)
6425                 return 0;
6426
6427         if (BNXT_PF(bp))
6428                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6429                                                 ring_grps, cp_rings, stats,
6430                                                 vnics);
6431
6432         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6433                                         cp_rings, stats, vnics);
6434 }
6435
6436 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6437 {
6438         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6439         struct hwrm_ring_aggint_qcaps_output *resp;
6440         struct hwrm_ring_aggint_qcaps_input *req;
6441         int rc;
6442
6443         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6444         coal_cap->num_cmpl_dma_aggr_max = 63;
6445         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6446         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6447         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6448         coal_cap->int_lat_tmr_min_max = 65535;
6449         coal_cap->int_lat_tmr_max_max = 65535;
6450         coal_cap->num_cmpl_aggr_int_max = 65535;
6451         coal_cap->timer_units = 80;
6452
6453         if (bp->hwrm_spec_code < 0x10902)
6454                 return;
6455
6456         if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6457                 return;
6458
6459         resp = hwrm_req_hold(bp, req);
6460         rc = hwrm_req_send_silent(bp, req);
6461         if (!rc) {
6462                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6463                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6464                 coal_cap->num_cmpl_dma_aggr_max =
6465                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6466                 coal_cap->num_cmpl_dma_aggr_during_int_max =
6467                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6468                 coal_cap->cmpl_aggr_dma_tmr_max =
6469                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6470                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6471                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6472                 coal_cap->int_lat_tmr_min_max =
6473                         le16_to_cpu(resp->int_lat_tmr_min_max);
6474                 coal_cap->int_lat_tmr_max_max =
6475                         le16_to_cpu(resp->int_lat_tmr_max_max);
6476                 coal_cap->num_cmpl_aggr_int_max =
6477                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
6478                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6479         }
6480         hwrm_req_drop(bp, req);
6481 }
6482
6483 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6484 {
6485         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6486
6487         return usec * 1000 / coal_cap->timer_units;
6488 }
6489
6490 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6491         struct bnxt_coal *hw_coal,
6492         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6493 {
6494         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6495         u32 cmpl_params = coal_cap->cmpl_params;
6496         u16 val, tmr, max, flags = 0;
6497
6498         max = hw_coal->bufs_per_record * 128;
6499         if (hw_coal->budget)
6500                 max = hw_coal->bufs_per_record * hw_coal->budget;
6501         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6502
6503         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6504         req->num_cmpl_aggr_int = cpu_to_le16(val);
6505
6506         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6507         req->num_cmpl_dma_aggr = cpu_to_le16(val);
6508
6509         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6510                       coal_cap->num_cmpl_dma_aggr_during_int_max);
6511         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6512
6513         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6514         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6515         req->int_lat_tmr_max = cpu_to_le16(tmr);
6516
6517         /* min timer set to 1/2 of interrupt timer */
6518         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6519                 val = tmr / 2;
6520                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6521                 req->int_lat_tmr_min = cpu_to_le16(val);
6522                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6523         }
6524
6525         /* buf timer set to 1/4 of interrupt timer */
6526         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6527         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6528
6529         if (cmpl_params &
6530             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6531                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6532                 val = clamp_t(u16, tmr, 1,
6533                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6534                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6535                 req->enables |=
6536                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6537         }
6538
6539         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6540                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6541         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6542             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6543                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6544         req->flags = cpu_to_le16(flags);
6545         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6546 }
6547
6548 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6549                                    struct bnxt_coal *hw_coal)
6550 {
6551         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
6552         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6553         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6554         u32 nq_params = coal_cap->nq_params;
6555         u16 tmr;
6556         int rc;
6557
6558         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6559                 return 0;
6560
6561         rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6562         if (rc)
6563                 return rc;
6564
6565         req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6566         req->flags =
6567                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6568
6569         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6570         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6571         req->int_lat_tmr_min = cpu_to_le16(tmr);
6572         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6573         return hwrm_req_send(bp, req);
6574 }
6575
6576 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6577 {
6578         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6579         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6580         struct bnxt_coal coal;
6581         int rc;
6582
6583         /* Tick values in micro seconds.
6584          * 1 coal_buf x bufs_per_record = 1 completion record.
6585          */
6586         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6587
6588         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6589         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6590
6591         if (!bnapi->rx_ring)
6592                 return -ENODEV;
6593
6594         rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6595         if (rc)
6596                 return rc;
6597
6598         bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6599
6600         req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6601
6602         return hwrm_req_send(bp, req_rx);
6603 }
6604
6605 int bnxt_hwrm_set_coal(struct bnxt *bp)
6606 {
6607         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6608                                                            *req;
6609         int i, rc;
6610
6611         rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6612         if (rc)
6613                 return rc;
6614
6615         rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6616         if (rc) {
6617                 hwrm_req_drop(bp, req_rx);
6618                 return rc;
6619         }
6620
6621         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6622         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
6623
6624         hwrm_req_hold(bp, req_rx);
6625         hwrm_req_hold(bp, req_tx);
6626         for (i = 0; i < bp->cp_nr_rings; i++) {
6627                 struct bnxt_napi *bnapi = bp->bnapi[i];
6628                 struct bnxt_coal *hw_coal;
6629                 u16 ring_id;
6630
6631                 req = req_rx;
6632                 if (!bnapi->rx_ring) {
6633                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6634                         req = req_tx;
6635                 } else {
6636                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6637                 }
6638                 req->ring_id = cpu_to_le16(ring_id);
6639
6640                 rc = hwrm_req_send(bp, req);
6641                 if (rc)
6642                         break;
6643
6644                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6645                         continue;
6646
6647                 if (bnapi->rx_ring && bnapi->tx_ring) {
6648                         req = req_tx;
6649                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6650                         req->ring_id = cpu_to_le16(ring_id);
6651                         rc = hwrm_req_send(bp, req);
6652                         if (rc)
6653                                 break;
6654                 }
6655                 if (bnapi->rx_ring)
6656                         hw_coal = &bp->rx_coal;
6657                 else
6658                         hw_coal = &bp->tx_coal;
6659                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6660         }
6661         hwrm_req_drop(bp, req_rx);
6662         hwrm_req_drop(bp, req_tx);
6663         return rc;
6664 }
6665
6666 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6667 {
6668         struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6669         struct hwrm_stat_ctx_free_input *req;
6670         int i;
6671
6672         if (!bp->bnapi)
6673                 return;
6674
6675         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6676                 return;
6677
6678         if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6679                 return;
6680         if (BNXT_FW_MAJ(bp) <= 20) {
6681                 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6682                         hwrm_req_drop(bp, req);
6683                         return;
6684                 }
6685                 hwrm_req_hold(bp, req0);
6686         }
6687         hwrm_req_hold(bp, req);
6688         for (i = 0; i < bp->cp_nr_rings; i++) {
6689                 struct bnxt_napi *bnapi = bp->bnapi[i];
6690                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6691
6692                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6693                         req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6694                         if (req0) {
6695                                 req0->stat_ctx_id = req->stat_ctx_id;
6696                                 hwrm_req_send(bp, req0);
6697                         }
6698                         hwrm_req_send(bp, req);
6699
6700                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6701                 }
6702         }
6703         hwrm_req_drop(bp, req);
6704         if (req0)
6705                 hwrm_req_drop(bp, req0);
6706 }
6707
6708 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6709 {
6710         struct hwrm_stat_ctx_alloc_output *resp;
6711         struct hwrm_stat_ctx_alloc_input *req;
6712         int rc, i;
6713
6714         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6715                 return 0;
6716
6717         rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6718         if (rc)
6719                 return rc;
6720
6721         req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6722         req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6723
6724         resp = hwrm_req_hold(bp, req);
6725         for (i = 0; i < bp->cp_nr_rings; i++) {
6726                 struct bnxt_napi *bnapi = bp->bnapi[i];
6727                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6728
6729                 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6730
6731                 rc = hwrm_req_send(bp, req);
6732                 if (rc)
6733                         break;
6734
6735                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6736
6737                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6738         }
6739         hwrm_req_drop(bp, req);
6740         return rc;
6741 }
6742
6743 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6744 {
6745         struct hwrm_func_qcfg_output *resp;
6746         struct hwrm_func_qcfg_input *req;
6747         u32 min_db_offset = 0;
6748         u16 flags;
6749         int rc;
6750
6751         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6752         if (rc)
6753                 return rc;
6754
6755         req->fid = cpu_to_le16(0xffff);
6756         resp = hwrm_req_hold(bp, req);
6757         rc = hwrm_req_send(bp, req);
6758         if (rc)
6759                 goto func_qcfg_exit;
6760
6761 #ifdef CONFIG_BNXT_SRIOV
6762         if (BNXT_VF(bp)) {
6763                 struct bnxt_vf_info *vf = &bp->vf;
6764
6765                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6766         } else {
6767                 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6768         }
6769 #endif
6770         flags = le16_to_cpu(resp->flags);
6771         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6772                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6773                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6774                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6775                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6776         }
6777         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6778                 bp->flags |= BNXT_FLAG_MULTI_HOST;
6779         if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6780                 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6781
6782         switch (resp->port_partition_type) {
6783         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6784         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6785         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6786                 bp->port_partition_type = resp->port_partition_type;
6787                 break;
6788         }
6789         if (bp->hwrm_spec_code < 0x10707 ||
6790             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6791                 bp->br_mode = BRIDGE_MODE_VEB;
6792         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6793                 bp->br_mode = BRIDGE_MODE_VEPA;
6794         else
6795                 bp->br_mode = BRIDGE_MODE_UNDEF;
6796
6797         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6798         if (!bp->max_mtu)
6799                 bp->max_mtu = BNXT_MAX_MTU;
6800
6801         if (bp->db_size)
6802                 goto func_qcfg_exit;
6803
6804         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6805                 if (BNXT_PF(bp))
6806                         min_db_offset = DB_PF_OFFSET_P5;
6807                 else
6808                         min_db_offset = DB_VF_OFFSET_P5;
6809         }
6810         bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6811                                  1024);
6812         if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6813             bp->db_size <= min_db_offset)
6814                 bp->db_size = pci_resource_len(bp->pdev, 2);
6815
6816 func_qcfg_exit:
6817         hwrm_req_drop(bp, req);
6818         return rc;
6819 }
6820
6821 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6822                         struct hwrm_func_backing_store_qcaps_output *resp)
6823 {
6824         struct bnxt_mem_init *mem_init;
6825         u16 init_mask;
6826         u8 init_val;
6827         u8 *offset;
6828         int i;
6829
6830         init_val = resp->ctx_kind_initializer;
6831         init_mask = le16_to_cpu(resp->ctx_init_mask);
6832         offset = &resp->qp_init_offset;
6833         mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6834         for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
6835                 mem_init->init_val = init_val;
6836                 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6837                 if (!init_mask)
6838                         continue;
6839                 if (i == BNXT_CTX_MEM_INIT_STAT)
6840                         offset = &resp->stat_init_offset;
6841                 if (init_mask & (1 << i))
6842                         mem_init->offset = *offset * 4;
6843                 else
6844                         mem_init->init_val = 0;
6845         }
6846         ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6847         ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6848         ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6849         ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6850         ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6851         ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
6852 }
6853
6854 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6855 {
6856         struct hwrm_func_backing_store_qcaps_output *resp;
6857         struct hwrm_func_backing_store_qcaps_input *req;
6858         int rc;
6859
6860         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6861                 return 0;
6862
6863         rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
6864         if (rc)
6865                 return rc;
6866
6867         resp = hwrm_req_hold(bp, req);
6868         rc = hwrm_req_send_silent(bp, req);
6869         if (!rc) {
6870                 struct bnxt_ctx_pg_info *ctx_pg;
6871                 struct bnxt_ctx_mem_info *ctx;
6872                 int i, tqm_rings;
6873
6874                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6875                 if (!ctx) {
6876                         rc = -ENOMEM;
6877                         goto ctx_err;
6878                 }
6879                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6880                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6881                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6882                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6883                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6884                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6885                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6886                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6887                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6888                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6889                 ctx->vnic_max_vnic_entries =
6890                         le16_to_cpu(resp->vnic_max_vnic_entries);
6891                 ctx->vnic_max_ring_table_entries =
6892                         le16_to_cpu(resp->vnic_max_ring_table_entries);
6893                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6894                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6895                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6896                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6897                 ctx->tqm_min_entries_per_ring =
6898                         le32_to_cpu(resp->tqm_min_entries_per_ring);
6899                 ctx->tqm_max_entries_per_ring =
6900                         le32_to_cpu(resp->tqm_max_entries_per_ring);
6901                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6902                 if (!ctx->tqm_entries_multiple)
6903                         ctx->tqm_entries_multiple = 1;
6904                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6905                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6906                 ctx->mrav_num_entries_units =
6907                         le16_to_cpu(resp->mrav_num_entries_units);
6908                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6909                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6910
6911                 bnxt_init_ctx_initializer(ctx, resp);
6912
6913                 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6914                 if (!ctx->tqm_fp_rings_count)
6915                         ctx->tqm_fp_rings_count = bp->max_q;
6916                 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6917                         ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
6918
6919                 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
6920                 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6921                 if (!ctx_pg) {
6922                         kfree(ctx);
6923                         rc = -ENOMEM;
6924                         goto ctx_err;
6925                 }
6926                 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6927                         ctx->tqm_mem[i] = ctx_pg;
6928                 bp->ctx = ctx;
6929         } else {
6930                 rc = 0;
6931         }
6932 ctx_err:
6933         hwrm_req_drop(bp, req);
6934         return rc;
6935 }
6936
6937 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6938                                   __le64 *pg_dir)
6939 {
6940         if (!rmem->nr_pages)
6941                 return;
6942
6943         BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
6944         if (rmem->depth >= 1) {
6945                 if (rmem->depth == 2)
6946                         *pg_attr |= 2;
6947                 else
6948                         *pg_attr |= 1;
6949                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6950         } else {
6951                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6952         }
6953 }
6954
6955 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
6956         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
6957          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
6958          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
6959          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
6960          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6961
6962 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6963 {
6964         struct hwrm_func_backing_store_cfg_input *req;
6965         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6966         struct bnxt_ctx_pg_info *ctx_pg;
6967         void **__req = (void **)&req;
6968         u32 req_len = sizeof(*req);
6969         __le32 *num_entries;
6970         __le64 *pg_dir;
6971         u32 flags = 0;
6972         u8 *pg_attr;
6973         u32 ena;
6974         int rc;
6975         int i;
6976
6977         if (!ctx)
6978                 return 0;
6979
6980         if (req_len > bp->hwrm_max_ext_req_len)
6981                 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
6982         rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
6983         if (rc)
6984                 return rc;
6985
6986         req->enables = cpu_to_le32(enables);
6987         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6988                 ctx_pg = &ctx->qp_mem;
6989                 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
6990                 req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6991                 req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6992                 req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6993                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6994                                       &req->qpc_pg_size_qpc_lvl,
6995                                       &req->qpc_page_dir);
6996         }
6997         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6998                 ctx_pg = &ctx->srq_mem;
6999                 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
7000                 req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7001                 req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7002                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7003                                       &req->srq_pg_size_srq_lvl,
7004                                       &req->srq_page_dir);
7005         }
7006         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7007                 ctx_pg = &ctx->cq_mem;
7008                 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
7009                 req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7010                 req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7011                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7012                                       &req->cq_pg_size_cq_lvl,
7013                                       &req->cq_page_dir);
7014         }
7015         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7016                 ctx_pg = &ctx->vnic_mem;
7017                 req->vnic_num_vnic_entries =
7018                         cpu_to_le16(ctx->vnic_max_vnic_entries);
7019                 req->vnic_num_ring_table_entries =
7020                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
7021                 req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7022                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7023                                       &req->vnic_pg_size_vnic_lvl,
7024                                       &req->vnic_page_dir);
7025         }
7026         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7027                 ctx_pg = &ctx->stat_mem;
7028                 req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7029                 req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7030                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7031                                       &req->stat_pg_size_stat_lvl,
7032                                       &req->stat_page_dir);
7033         }
7034         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7035                 ctx_pg = &ctx->mrav_mem;
7036                 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7037                 if (ctx->mrav_num_entries_units)
7038                         flags |=
7039                         FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7040                 req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7041                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7042                                       &req->mrav_pg_size_mrav_lvl,
7043                                       &req->mrav_page_dir);
7044         }
7045         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7046                 ctx_pg = &ctx->tim_mem;
7047                 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7048                 req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7049                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7050                                       &req->tim_pg_size_tim_lvl,
7051                                       &req->tim_page_dir);
7052         }
7053         for (i = 0, num_entries = &req->tqm_sp_num_entries,
7054              pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7055              pg_dir = &req->tqm_sp_page_dir,
7056              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7057              i < BNXT_MAX_TQM_RINGS;
7058              i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7059                 if (!(enables & ena))
7060                         continue;
7061
7062                 req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7063                 ctx_pg = ctx->tqm_mem[i];
7064                 *num_entries = cpu_to_le32(ctx_pg->entries);
7065                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7066         }
7067         req->flags = cpu_to_le32(flags);
7068         return hwrm_req_send(bp, req);
7069 }
7070
7071 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7072                                   struct bnxt_ctx_pg_info *ctx_pg)
7073 {
7074         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7075
7076         rmem->page_size = BNXT_PAGE_SIZE;
7077         rmem->pg_arr = ctx_pg->ctx_pg_arr;
7078         rmem->dma_arr = ctx_pg->ctx_dma_arr;
7079         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7080         if (rmem->depth >= 1)
7081                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7082         return bnxt_alloc_ring(bp, rmem);
7083 }
7084
7085 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7086                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7087                                   u8 depth, struct bnxt_mem_init *mem_init)
7088 {
7089         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7090         int rc;
7091
7092         if (!mem_size)
7093                 return -EINVAL;
7094
7095         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7096         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7097                 ctx_pg->nr_pages = 0;
7098                 return -EINVAL;
7099         }
7100         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7101                 int nr_tbls, i;
7102
7103                 rmem->depth = 2;
7104                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7105                                              GFP_KERNEL);
7106                 if (!ctx_pg->ctx_pg_tbl)
7107                         return -ENOMEM;
7108                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7109                 rmem->nr_pages = nr_tbls;
7110                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7111                 if (rc)
7112                         return rc;
7113                 for (i = 0; i < nr_tbls; i++) {
7114                         struct bnxt_ctx_pg_info *pg_tbl;
7115
7116                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7117                         if (!pg_tbl)
7118                                 return -ENOMEM;
7119                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7120                         rmem = &pg_tbl->ring_mem;
7121                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7122                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7123                         rmem->depth = 1;
7124                         rmem->nr_pages = MAX_CTX_PAGES;
7125                         rmem->mem_init = mem_init;
7126                         if (i == (nr_tbls - 1)) {
7127                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7128
7129                                 if (rem)
7130                                         rmem->nr_pages = rem;
7131                         }
7132                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7133                         if (rc)
7134                                 break;
7135                 }
7136         } else {
7137                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7138                 if (rmem->nr_pages > 1 || depth)
7139                         rmem->depth = 1;
7140                 rmem->mem_init = mem_init;
7141                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7142         }
7143         return rc;
7144 }
7145
7146 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7147                                   struct bnxt_ctx_pg_info *ctx_pg)
7148 {
7149         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7150
7151         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7152             ctx_pg->ctx_pg_tbl) {
7153                 int i, nr_tbls = rmem->nr_pages;
7154
7155                 for (i = 0; i < nr_tbls; i++) {
7156                         struct bnxt_ctx_pg_info *pg_tbl;
7157                         struct bnxt_ring_mem_info *rmem2;
7158
7159                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
7160                         if (!pg_tbl)
7161                                 continue;
7162                         rmem2 = &pg_tbl->ring_mem;
7163                         bnxt_free_ring(bp, rmem2);
7164                         ctx_pg->ctx_pg_arr[i] = NULL;
7165                         kfree(pg_tbl);
7166                         ctx_pg->ctx_pg_tbl[i] = NULL;
7167                 }
7168                 kfree(ctx_pg->ctx_pg_tbl);
7169                 ctx_pg->ctx_pg_tbl = NULL;
7170         }
7171         bnxt_free_ring(bp, rmem);
7172         ctx_pg->nr_pages = 0;
7173 }
7174
7175 static void bnxt_free_ctx_mem(struct bnxt *bp)
7176 {
7177         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7178         int i;
7179
7180         if (!ctx)
7181                 return;
7182
7183         if (ctx->tqm_mem[0]) {
7184                 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7185                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7186                 kfree(ctx->tqm_mem[0]);
7187                 ctx->tqm_mem[0] = NULL;
7188         }
7189
7190         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7191         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7192         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7193         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7194         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7195         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7196         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7197         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7198 }
7199
7200 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7201 {
7202         struct bnxt_ctx_pg_info *ctx_pg;
7203         struct bnxt_ctx_mem_info *ctx;
7204         struct bnxt_mem_init *init;
7205         u32 mem_size, ena, entries;
7206         u32 entries_sp, min;
7207         u32 num_mr, num_ah;
7208         u32 extra_srqs = 0;
7209         u32 extra_qps = 0;
7210         u8 pg_lvl = 1;
7211         int i, rc;
7212
7213         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7214         if (rc) {
7215                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7216                            rc);
7217                 return rc;
7218         }
7219         ctx = bp->ctx;
7220         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7221                 return 0;
7222
7223         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7224                 pg_lvl = 2;
7225                 extra_qps = 65536;
7226                 extra_srqs = 8192;
7227         }
7228
7229         ctx_pg = &ctx->qp_mem;
7230         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7231                           extra_qps;
7232         if (ctx->qp_entry_size) {
7233                 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7234                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7235                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7236                 if (rc)
7237                         return rc;
7238         }
7239
7240         ctx_pg = &ctx->srq_mem;
7241         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7242         if (ctx->srq_entry_size) {
7243                 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7244                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7245                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7246                 if (rc)
7247                         return rc;
7248         }
7249
7250         ctx_pg = &ctx->cq_mem;
7251         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7252         if (ctx->cq_entry_size) {
7253                 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7254                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7255                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7256                 if (rc)
7257                         return rc;
7258         }
7259
7260         ctx_pg = &ctx->vnic_mem;
7261         ctx_pg->entries = ctx->vnic_max_vnic_entries +
7262                           ctx->vnic_max_ring_table_entries;
7263         if (ctx->vnic_entry_size) {
7264                 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7265                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7266                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7267                 if (rc)
7268                         return rc;
7269         }
7270
7271         ctx_pg = &ctx->stat_mem;
7272         ctx_pg->entries = ctx->stat_max_entries;
7273         if (ctx->stat_entry_size) {
7274                 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7275                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7276                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7277                 if (rc)
7278                         return rc;
7279         }
7280
7281         ena = 0;
7282         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7283                 goto skip_rdma;
7284
7285         ctx_pg = &ctx->mrav_mem;
7286         /* 128K extra is needed to accommodate static AH context
7287          * allocation by f/w.
7288          */
7289         num_mr = 1024 * 256;
7290         num_ah = 1024 * 128;
7291         ctx_pg->entries = num_mr + num_ah;
7292         if (ctx->mrav_entry_size) {
7293                 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7294                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7295                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7296                 if (rc)
7297                         return rc;
7298         }
7299         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7300         if (ctx->mrav_num_entries_units)
7301                 ctx_pg->entries =
7302                         ((num_mr / ctx->mrav_num_entries_units) << 16) |
7303                          (num_ah / ctx->mrav_num_entries_units);
7304
7305         ctx_pg = &ctx->tim_mem;
7306         ctx_pg->entries = ctx->qp_mem.entries;
7307         if (ctx->tim_entry_size) {
7308                 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7309                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7310                 if (rc)
7311                         return rc;
7312         }
7313         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7314
7315 skip_rdma:
7316         min = ctx->tqm_min_entries_per_ring;
7317         entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7318                      2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7319         entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7320         entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7321         entries = roundup(entries, ctx->tqm_entries_multiple);
7322         entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7323         for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7324                 ctx_pg = ctx->tqm_mem[i];
7325                 ctx_pg->entries = i ? entries : entries_sp;
7326                 if (ctx->tqm_entry_size) {
7327                         mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7328                         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7329                                                     NULL);
7330                         if (rc)
7331                                 return rc;
7332                 }
7333                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7334         }
7335         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7336         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7337         if (rc) {
7338                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7339                            rc);
7340                 return rc;
7341         }
7342         ctx->flags |= BNXT_CTX_FLAG_INITED;
7343         return 0;
7344 }
7345
7346 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7347 {
7348         struct hwrm_func_resource_qcaps_output *resp;
7349         struct hwrm_func_resource_qcaps_input *req;
7350         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7351         int rc;
7352
7353         rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7354         if (rc)
7355                 return rc;
7356
7357         req->fid = cpu_to_le16(0xffff);
7358         resp = hwrm_req_hold(bp, req);
7359         rc = hwrm_req_send_silent(bp, req);
7360         if (rc)
7361                 goto hwrm_func_resc_qcaps_exit;
7362
7363         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7364         if (!all)
7365                 goto hwrm_func_resc_qcaps_exit;
7366
7367         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7368         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7369         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7370         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7371         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7372         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7373         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7374         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7375         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7376         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7377         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7378         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7379         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7380         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7381         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7382         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7383
7384         if (bp->flags & BNXT_FLAG_CHIP_P5) {
7385                 u16 max_msix = le16_to_cpu(resp->max_msix);
7386
7387                 hw_resc->max_nqs = max_msix;
7388                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7389         }
7390
7391         if (BNXT_PF(bp)) {
7392                 struct bnxt_pf_info *pf = &bp->pf;
7393
7394                 pf->vf_resv_strategy =
7395                         le16_to_cpu(resp->vf_reservation_strategy);
7396                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7397                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7398         }
7399 hwrm_func_resc_qcaps_exit:
7400         hwrm_req_drop(bp, req);
7401         return rc;
7402 }
7403
7404 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7405 {
7406         struct hwrm_port_mac_ptp_qcfg_output *resp;
7407         struct hwrm_port_mac_ptp_qcfg_input *req;
7408         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7409         u8 flags;
7410         int rc;
7411
7412         if (bp->hwrm_spec_code < 0x10801) {
7413                 rc = -ENODEV;
7414                 goto no_ptp;
7415         }
7416
7417         rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
7418         if (rc)
7419                 goto no_ptp;
7420
7421         req->port_id = cpu_to_le16(bp->pf.port_id);
7422         resp = hwrm_req_hold(bp, req);
7423         rc = hwrm_req_send(bp, req);
7424         if (rc)
7425                 goto exit;
7426
7427         flags = resp->flags;
7428         if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7429                 rc = -ENODEV;
7430                 goto exit;
7431         }
7432         if (!ptp) {
7433                 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7434                 if (!ptp) {
7435                         rc = -ENOMEM;
7436                         goto exit;
7437                 }
7438                 ptp->bp = bp;
7439                 bp->ptp_cfg = ptp;
7440         }
7441         if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7442                 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7443                 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7444         } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7445                 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7446                 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7447         } else {
7448                 rc = -ENODEV;
7449                 goto exit;
7450         }
7451         rc = bnxt_ptp_init(bp);
7452         if (rc)
7453                 netdev_warn(bp->dev, "PTP initialization failed.\n");
7454 exit:
7455         hwrm_req_drop(bp, req);
7456         if (!rc)
7457                 return 0;
7458
7459 no_ptp:
7460         bnxt_ptp_clear(bp);
7461         kfree(ptp);
7462         bp->ptp_cfg = NULL;
7463         return rc;
7464 }
7465
7466 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7467 {
7468         struct hwrm_func_qcaps_output *resp;
7469         struct hwrm_func_qcaps_input *req;
7470         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7471         u32 flags, flags_ext;
7472         int rc;
7473
7474         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7475         if (rc)
7476                 return rc;
7477
7478         req->fid = cpu_to_le16(0xffff);
7479         resp = hwrm_req_hold(bp, req);
7480         rc = hwrm_req_send(bp, req);
7481         if (rc)
7482                 goto hwrm_func_qcaps_exit;
7483
7484         flags = le32_to_cpu(resp->flags);
7485         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7486                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7487         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7488                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7489         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7490                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7491         if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7492                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7493         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7494                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7495         if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7496                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7497         if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7498                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7499         if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7500                 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7501
7502         flags_ext = le32_to_cpu(resp->flags_ext);
7503         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7504                 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7505         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7506                 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
7507
7508         bp->tx_push_thresh = 0;
7509         if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7510             BNXT_FW_MAJ(bp) > 217)
7511                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7512
7513         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7514         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7515         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7516         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7517         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7518         if (!hw_resc->max_hw_ring_grps)
7519                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7520         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7521         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7522         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7523
7524         if (BNXT_PF(bp)) {
7525                 struct bnxt_pf_info *pf = &bp->pf;
7526
7527                 pf->fw_fid = le16_to_cpu(resp->fid);
7528                 pf->port_id = le16_to_cpu(resp->port_id);
7529                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7530                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7531                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7532                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7533                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7534                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7535                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7536                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7537                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7538                 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7539                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7540                         bp->flags |= BNXT_FLAG_WOL_CAP;
7541                 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7542                         __bnxt_hwrm_ptp_qcfg(bp);
7543                 } else {
7544                         bnxt_ptp_clear(bp);
7545                         kfree(bp->ptp_cfg);
7546                         bp->ptp_cfg = NULL;
7547                 }
7548         } else {
7549 #ifdef CONFIG_BNXT_SRIOV
7550                 struct bnxt_vf_info *vf = &bp->vf;
7551
7552                 vf->fw_fid = le16_to_cpu(resp->fid);
7553                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7554 #endif
7555         }
7556
7557 hwrm_func_qcaps_exit:
7558         hwrm_req_drop(bp, req);
7559         return rc;
7560 }
7561
7562 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7563
7564 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7565 {
7566         int rc;
7567
7568         rc = __bnxt_hwrm_func_qcaps(bp);
7569         if (rc)
7570                 return rc;
7571         rc = bnxt_hwrm_queue_qportcfg(bp);
7572         if (rc) {
7573                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7574                 return rc;
7575         }
7576         if (bp->hwrm_spec_code >= 0x10803) {
7577                 rc = bnxt_alloc_ctx_mem(bp);
7578                 if (rc)
7579                         return rc;
7580                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7581                 if (!rc)
7582                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7583         }
7584         return 0;
7585 }
7586
7587 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7588 {
7589         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7590         struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
7591         u32 flags;
7592         int rc;
7593
7594         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7595                 return 0;
7596
7597         rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7598         if (rc)
7599                 return rc;
7600
7601         resp = hwrm_req_hold(bp, req);
7602         rc = hwrm_req_send(bp, req);
7603         if (rc)
7604                 goto hwrm_cfa_adv_qcaps_exit;
7605
7606         flags = le32_to_cpu(resp->flags);
7607         if (flags &
7608             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7609                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7610
7611 hwrm_cfa_adv_qcaps_exit:
7612         hwrm_req_drop(bp, req);
7613         return rc;
7614 }
7615
7616 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7617 {
7618         if (bp->fw_health)
7619                 return 0;
7620
7621         bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7622         if (!bp->fw_health)
7623                 return -ENOMEM;
7624
7625         return 0;
7626 }
7627
7628 static int bnxt_alloc_fw_health(struct bnxt *bp)
7629 {
7630         int rc;
7631
7632         if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7633             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7634                 return 0;
7635
7636         rc = __bnxt_alloc_fw_health(bp);
7637         if (rc) {
7638                 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7639                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7640                 return rc;
7641         }
7642
7643         return 0;
7644 }
7645
7646 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7647 {
7648         writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7649                                          BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7650                                          BNXT_FW_HEALTH_WIN_MAP_OFF);
7651 }
7652
7653 bool bnxt_is_fw_healthy(struct bnxt *bp)
7654 {
7655         if (bp->fw_health && bp->fw_health->status_reliable) {
7656                 u32 fw_status;
7657
7658                 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7659                 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7660                         return false;
7661         }
7662
7663         return true;
7664 }
7665
7666 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7667 {
7668         struct bnxt_fw_health *fw_health = bp->fw_health;
7669         u32 reg_type;
7670
7671         if (!fw_health || !fw_health->status_reliable)
7672                 return;
7673
7674         reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7675         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7676                 fw_health->status_reliable = false;
7677 }
7678
7679 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7680 {
7681         void __iomem *hs;
7682         u32 status_loc;
7683         u32 reg_type;
7684         u32 sig;
7685
7686         if (bp->fw_health)
7687                 bp->fw_health->status_reliable = false;
7688
7689         __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7690         hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7691
7692         sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7693         if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7694                 if (!bp->chip_num) {
7695                         __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7696                         bp->chip_num = readl(bp->bar0 +
7697                                              BNXT_FW_HEALTH_WIN_BASE +
7698                                              BNXT_GRC_REG_CHIP_NUM);
7699                 }
7700                 if (!BNXT_CHIP_P5(bp))
7701                         return;
7702
7703                 status_loc = BNXT_GRC_REG_STATUS_P5 |
7704                              BNXT_FW_HEALTH_REG_TYPE_BAR0;
7705         } else {
7706                 status_loc = readl(hs + offsetof(struct hcomm_status,
7707                                                  fw_status_loc));
7708         }
7709
7710         if (__bnxt_alloc_fw_health(bp)) {
7711                 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7712                 return;
7713         }
7714
7715         bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7716         reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7717         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7718                 __bnxt_map_fw_health_reg(bp, status_loc);
7719                 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7720                         BNXT_FW_HEALTH_WIN_OFF(status_loc);
7721         }
7722
7723         bp->fw_health->status_reliable = true;
7724 }
7725
7726 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7727 {
7728         struct bnxt_fw_health *fw_health = bp->fw_health;
7729         u32 reg_base = 0xffffffff;
7730         int i;
7731
7732         bp->fw_health->status_reliable = false;
7733         /* Only pre-map the monitoring GRC registers using window 3 */
7734         for (i = 0; i < 4; i++) {
7735                 u32 reg = fw_health->regs[i];
7736
7737                 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7738                         continue;
7739                 if (reg_base == 0xffffffff)
7740                         reg_base = reg & BNXT_GRC_BASE_MASK;
7741                 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7742                         return -ERANGE;
7743                 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7744         }
7745         bp->fw_health->status_reliable = true;
7746         if (reg_base == 0xffffffff)
7747                 return 0;
7748
7749         __bnxt_map_fw_health_reg(bp, reg_base);
7750         return 0;
7751 }
7752
7753 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7754 {
7755         struct bnxt_fw_health *fw_health = bp->fw_health;
7756         struct hwrm_error_recovery_qcfg_output *resp;
7757         struct hwrm_error_recovery_qcfg_input *req;
7758         int rc, i;
7759
7760         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7761                 return 0;
7762
7763         rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7764         if (rc)
7765                 return rc;
7766
7767         resp = hwrm_req_hold(bp, req);
7768         rc = hwrm_req_send(bp, req);
7769         if (rc)
7770                 goto err_recovery_out;
7771         fw_health->flags = le32_to_cpu(resp->flags);
7772         if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7773             !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7774                 rc = -EINVAL;
7775                 goto err_recovery_out;
7776         }
7777         fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7778         fw_health->master_func_wait_dsecs =
7779                 le32_to_cpu(resp->master_func_wait_period);
7780         fw_health->normal_func_wait_dsecs =
7781                 le32_to_cpu(resp->normal_func_wait_period);
7782         fw_health->post_reset_wait_dsecs =
7783                 le32_to_cpu(resp->master_func_wait_period_after_reset);
7784         fw_health->post_reset_max_wait_dsecs =
7785                 le32_to_cpu(resp->max_bailout_time_after_reset);
7786         fw_health->regs[BNXT_FW_HEALTH_REG] =
7787                 le32_to_cpu(resp->fw_health_status_reg);
7788         fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7789                 le32_to_cpu(resp->fw_heartbeat_reg);
7790         fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7791                 le32_to_cpu(resp->fw_reset_cnt_reg);
7792         fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7793                 le32_to_cpu(resp->reset_inprogress_reg);
7794         fw_health->fw_reset_inprog_reg_mask =
7795                 le32_to_cpu(resp->reset_inprogress_reg_mask);
7796         fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7797         if (fw_health->fw_reset_seq_cnt >= 16) {
7798                 rc = -EINVAL;
7799                 goto err_recovery_out;
7800         }
7801         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7802                 fw_health->fw_reset_seq_regs[i] =
7803                         le32_to_cpu(resp->reset_reg[i]);
7804                 fw_health->fw_reset_seq_vals[i] =
7805                         le32_to_cpu(resp->reset_reg_val[i]);
7806                 fw_health->fw_reset_seq_delay_msec[i] =
7807                         resp->delay_after_reset[i];
7808         }
7809 err_recovery_out:
7810         hwrm_req_drop(bp, req);
7811         if (!rc)
7812                 rc = bnxt_map_fw_health_regs(bp);
7813         if (rc)
7814                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7815         return rc;
7816 }
7817
7818 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7819 {
7820         struct hwrm_func_reset_input *req;
7821         int rc;
7822
7823         rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
7824         if (rc)
7825                 return rc;
7826
7827         req->enables = 0;
7828         hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
7829         return hwrm_req_send(bp, req);
7830 }
7831
7832 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7833 {
7834         struct hwrm_nvm_get_dev_info_output nvm_info;
7835
7836         if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7837                 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7838                          nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7839                          nvm_info.nvm_cfg_ver_upd);
7840 }
7841
7842 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7843 {
7844         struct hwrm_queue_qportcfg_output *resp;
7845         struct hwrm_queue_qportcfg_input *req;
7846         u8 i, j, *qptr;
7847         bool no_rdma;
7848         int rc = 0;
7849
7850         rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
7851         if (rc)
7852                 return rc;
7853
7854         resp = hwrm_req_hold(bp, req);
7855         rc = hwrm_req_send(bp, req);
7856         if (rc)
7857                 goto qportcfg_exit;
7858
7859         if (!resp->max_configurable_queues) {
7860                 rc = -EINVAL;
7861                 goto qportcfg_exit;
7862         }
7863         bp->max_tc = resp->max_configurable_queues;
7864         bp->max_lltc = resp->max_configurable_lossless_queues;
7865         if (bp->max_tc > BNXT_MAX_QUEUE)
7866                 bp->max_tc = BNXT_MAX_QUEUE;
7867
7868         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7869         qptr = &resp->queue_id0;
7870         for (i = 0, j = 0; i < bp->max_tc; i++) {
7871                 bp->q_info[j].queue_id = *qptr;
7872                 bp->q_ids[i] = *qptr++;
7873                 bp->q_info[j].queue_profile = *qptr++;
7874                 bp->tc_to_qidx[j] = j;
7875                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7876                     (no_rdma && BNXT_PF(bp)))
7877                         j++;
7878         }
7879         bp->max_q = bp->max_tc;
7880         bp->max_tc = max_t(u8, j, 1);
7881
7882         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7883                 bp->max_tc = 1;
7884
7885         if (bp->max_lltc > bp->max_tc)
7886                 bp->max_lltc = bp->max_tc;
7887
7888 qportcfg_exit:
7889         hwrm_req_drop(bp, req);
7890         return rc;
7891 }
7892
7893 static int bnxt_hwrm_poll(struct bnxt *bp)
7894 {
7895         struct hwrm_ver_get_input *req;
7896         int rc;
7897
7898         rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7899         if (rc)
7900                 return rc;
7901
7902         req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7903         req->hwrm_intf_min = HWRM_VERSION_MINOR;
7904         req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7905
7906         hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
7907         rc = hwrm_req_send(bp, req);
7908         return rc;
7909 }
7910
7911 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7912 {
7913         struct hwrm_ver_get_output *resp;
7914         struct hwrm_ver_get_input *req;
7915         u16 fw_maj, fw_min, fw_bld, fw_rsv;
7916         u32 dev_caps_cfg, hwrm_ver;
7917         int rc, len;
7918
7919         rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7920         if (rc)
7921                 return rc;
7922
7923         hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
7924         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7925         req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7926         req->hwrm_intf_min = HWRM_VERSION_MINOR;
7927         req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7928
7929         resp = hwrm_req_hold(bp, req);
7930         rc = hwrm_req_send(bp, req);
7931         if (rc)
7932                 goto hwrm_ver_get_exit;
7933
7934         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7935
7936         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7937                              resp->hwrm_intf_min_8b << 8 |
7938                              resp->hwrm_intf_upd_8b;
7939         if (resp->hwrm_intf_maj_8b < 1) {
7940                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7941                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7942                             resp->hwrm_intf_upd_8b);
7943                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7944         }
7945
7946         hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7947                         HWRM_VERSION_UPDATE;
7948
7949         if (bp->hwrm_spec_code > hwrm_ver)
7950                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7951                          HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7952                          HWRM_VERSION_UPDATE);
7953         else
7954                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7955                          resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7956                          resp->hwrm_intf_upd_8b);
7957
7958         fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7959         if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7960                 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7961                 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7962                 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7963                 len = FW_VER_STR_LEN;
7964         } else {
7965                 fw_maj = resp->hwrm_fw_maj_8b;
7966                 fw_min = resp->hwrm_fw_min_8b;
7967                 fw_bld = resp->hwrm_fw_bld_8b;
7968                 fw_rsv = resp->hwrm_fw_rsvd_8b;
7969                 len = BC_HWRM_STR_LEN;
7970         }
7971         bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7972         snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7973                  fw_rsv);
7974
7975         if (strlen(resp->active_pkg_name)) {
7976                 int fw_ver_len = strlen(bp->fw_ver_str);
7977
7978                 snprintf(bp->fw_ver_str + fw_ver_len,
7979                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7980                          resp->active_pkg_name);
7981                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7982         }
7983
7984         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7985         if (!bp->hwrm_cmd_timeout)
7986                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7987
7988         if (resp->hwrm_intf_maj_8b >= 1) {
7989                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7990                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7991         }
7992         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7993                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7994
7995         bp->chip_num = le16_to_cpu(resp->chip_num);
7996         bp->chip_rev = resp->chip_rev;
7997         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7998             !resp->chip_metal)
7999                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
8000
8001         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8002         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8003             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
8004                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8005
8006         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8007                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8008
8009         if (dev_caps_cfg &
8010             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8011                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8012
8013         if (dev_caps_cfg &
8014             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8015                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8016
8017         if (dev_caps_cfg &
8018             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8019                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8020
8021 hwrm_ver_get_exit:
8022         hwrm_req_drop(bp, req);
8023         return rc;
8024 }
8025
8026 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8027 {
8028         struct hwrm_fw_set_time_input *req;
8029         struct tm tm;
8030         time64_t now = ktime_get_real_seconds();
8031         int rc;
8032
8033         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8034             bp->hwrm_spec_code < 0x10400)
8035                 return -EOPNOTSUPP;
8036
8037         time64_to_tm(now, 0, &tm);
8038         rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8039         if (rc)
8040                 return rc;
8041
8042         req->year = cpu_to_le16(1900 + tm.tm_year);
8043         req->month = 1 + tm.tm_mon;
8044         req->day = tm.tm_mday;
8045         req->hour = tm.tm_hour;
8046         req->minute = tm.tm_min;
8047         req->second = tm.tm_sec;
8048         return hwrm_req_send(bp, req);
8049 }
8050
8051 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8052 {
8053         u64 sw_tmp;
8054
8055         hw &= mask;
8056         sw_tmp = (*sw & ~mask) | hw;
8057         if (hw < (*sw & mask))
8058                 sw_tmp += mask + 1;
8059         WRITE_ONCE(*sw, sw_tmp);
8060 }
8061
8062 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8063                                     int count, bool ignore_zero)
8064 {
8065         int i;
8066
8067         for (i = 0; i < count; i++) {
8068                 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8069
8070                 if (ignore_zero && !hw)
8071                         continue;
8072
8073                 if (masks[i] == -1ULL)
8074                         sw_stats[i] = hw;
8075                 else
8076                         bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8077         }
8078 }
8079
8080 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8081 {
8082         if (!stats->hw_stats)
8083                 return;
8084
8085         __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8086                                 stats->hw_masks, stats->len / 8, false);
8087 }
8088
8089 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8090 {
8091         struct bnxt_stats_mem *ring0_stats;
8092         bool ignore_zero = false;
8093         int i;
8094
8095         /* Chip bug.  Counter intermittently becomes 0. */
8096         if (bp->flags & BNXT_FLAG_CHIP_P5)
8097                 ignore_zero = true;
8098
8099         for (i = 0; i < bp->cp_nr_rings; i++) {
8100                 struct bnxt_napi *bnapi = bp->bnapi[i];
8101                 struct bnxt_cp_ring_info *cpr;
8102                 struct bnxt_stats_mem *stats;
8103
8104                 cpr = &bnapi->cp_ring;
8105                 stats = &cpr->stats;
8106                 if (!i)
8107                         ring0_stats = stats;
8108                 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8109                                         ring0_stats->hw_masks,
8110                                         ring0_stats->len / 8, ignore_zero);
8111         }
8112         if (bp->flags & BNXT_FLAG_PORT_STATS) {
8113                 struct bnxt_stats_mem *stats = &bp->port_stats;
8114                 __le64 *hw_stats = stats->hw_stats;
8115                 u64 *sw_stats = stats->sw_stats;
8116                 u64 *masks = stats->hw_masks;
8117                 int cnt;
8118
8119                 cnt = sizeof(struct rx_port_stats) / 8;
8120                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8121
8122                 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8123                 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8124                 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8125                 cnt = sizeof(struct tx_port_stats) / 8;
8126                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8127         }
8128         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8129                 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8130                 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8131         }
8132 }
8133
8134 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8135 {
8136         struct hwrm_port_qstats_input *req;
8137         struct bnxt_pf_info *pf = &bp->pf;
8138         int rc;
8139
8140         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8141                 return 0;
8142
8143         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8144                 return -EOPNOTSUPP;
8145
8146         rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8147         if (rc)
8148                 return rc;
8149
8150         req->flags = flags;
8151         req->port_id = cpu_to_le16(pf->port_id);
8152         req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8153                                             BNXT_TX_PORT_STATS_BYTE_OFFSET);
8154         req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8155         return hwrm_req_send(bp, req);
8156 }
8157
8158 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8159 {
8160         struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8161         struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8162         struct hwrm_port_qstats_ext_output *resp_qs;
8163         struct hwrm_port_qstats_ext_input *req_qs;
8164         struct bnxt_pf_info *pf = &bp->pf;
8165         u32 tx_stat_size;
8166         int rc;
8167
8168         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8169                 return 0;
8170
8171         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8172                 return -EOPNOTSUPP;
8173
8174         rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8175         if (rc)
8176                 return rc;
8177
8178         req_qs->flags = flags;
8179         req_qs->port_id = cpu_to_le16(pf->port_id);
8180         req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8181         req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8182         tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8183                        sizeof(struct tx_port_stats_ext) : 0;
8184         req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8185         req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8186         resp_qs = hwrm_req_hold(bp, req_qs);
8187         rc = hwrm_req_send(bp, req_qs);
8188         if (!rc) {
8189                 bp->fw_rx_stats_ext_size =
8190                         le16_to_cpu(resp_qs->rx_stat_size) / 8;
8191                 bp->fw_tx_stats_ext_size = tx_stat_size ?
8192                         le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
8193         } else {
8194                 bp->fw_rx_stats_ext_size = 0;
8195                 bp->fw_tx_stats_ext_size = 0;
8196         }
8197         hwrm_req_drop(bp, req_qs);
8198
8199         if (flags)
8200                 return rc;
8201
8202         if (bp->fw_tx_stats_ext_size <=
8203             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8204                 bp->pri2cos_valid = 0;
8205                 return rc;
8206         }
8207
8208         rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8209         if (rc)
8210                 return rc;
8211
8212         req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8213
8214         resp_qc = hwrm_req_hold(bp, req_qc);
8215         rc = hwrm_req_send(bp, req_qc);
8216         if (!rc) {
8217                 u8 *pri2cos;
8218                 int i, j;
8219
8220                 pri2cos = &resp_qc->pri0_cos_queue_id;
8221                 for (i = 0; i < 8; i++) {
8222                         u8 queue_id = pri2cos[i];
8223                         u8 queue_idx;
8224
8225                         /* Per port queue IDs start from 0, 10, 20, etc */
8226                         queue_idx = queue_id % 10;
8227                         if (queue_idx > BNXT_MAX_QUEUE) {
8228                                 bp->pri2cos_valid = false;
8229                                 hwrm_req_drop(bp, req_qc);
8230                                 return rc;
8231                         }
8232                         for (j = 0; j < bp->max_q; j++) {
8233                                 if (bp->q_ids[j] == queue_id)
8234                                         bp->pri2cos_idx[i] = queue_idx;
8235                         }
8236                 }
8237                 bp->pri2cos_valid = true;
8238         }
8239         hwrm_req_drop(bp, req_qc);
8240
8241         return rc;
8242 }
8243
8244 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8245 {
8246         bnxt_hwrm_tunnel_dst_port_free(bp,
8247                 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8248         bnxt_hwrm_tunnel_dst_port_free(bp,
8249                 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8250 }
8251
8252 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8253 {
8254         int rc, i;
8255         u32 tpa_flags = 0;
8256
8257         if (set_tpa)
8258                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8259         else if (BNXT_NO_FW_ACCESS(bp))
8260                 return 0;
8261         for (i = 0; i < bp->nr_vnics; i++) {
8262                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8263                 if (rc) {
8264                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8265                                    i, rc);
8266                         return rc;
8267                 }
8268         }
8269         return 0;
8270 }
8271
8272 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8273 {
8274         int i;
8275
8276         for (i = 0; i < bp->nr_vnics; i++)
8277                 bnxt_hwrm_vnic_set_rss(bp, i, false);
8278 }
8279
8280 static void bnxt_clear_vnic(struct bnxt *bp)
8281 {
8282         if (!bp->vnic_info)
8283                 return;
8284
8285         bnxt_hwrm_clear_vnic_filter(bp);
8286         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8287                 /* clear all RSS setting before free vnic ctx */
8288                 bnxt_hwrm_clear_vnic_rss(bp);
8289                 bnxt_hwrm_vnic_ctx_free(bp);
8290         }
8291         /* before free the vnic, undo the vnic tpa settings */
8292         if (bp->flags & BNXT_FLAG_TPA)
8293                 bnxt_set_tpa(bp, false);
8294         bnxt_hwrm_vnic_free(bp);
8295         if (bp->flags & BNXT_FLAG_CHIP_P5)
8296                 bnxt_hwrm_vnic_ctx_free(bp);
8297 }
8298
8299 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8300                                     bool irq_re_init)
8301 {
8302         bnxt_clear_vnic(bp);
8303         bnxt_hwrm_ring_free(bp, close_path);
8304         bnxt_hwrm_ring_grp_free(bp);
8305         if (irq_re_init) {
8306                 bnxt_hwrm_stat_ctx_free(bp);
8307                 bnxt_hwrm_free_tunnel_ports(bp);
8308         }
8309 }
8310
8311 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8312 {
8313         struct hwrm_func_cfg_input *req;
8314         u8 evb_mode;
8315         int rc;
8316
8317         if (br_mode == BRIDGE_MODE_VEB)
8318                 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8319         else if (br_mode == BRIDGE_MODE_VEPA)
8320                 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8321         else
8322                 return -EINVAL;
8323
8324         rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8325         if (rc)
8326                 return rc;
8327
8328         req->fid = cpu_to_le16(0xffff);
8329         req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8330         req->evb_mode = evb_mode;
8331         return hwrm_req_send(bp, req);
8332 }
8333
8334 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8335 {
8336         struct hwrm_func_cfg_input *req;
8337         int rc;
8338
8339         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8340                 return 0;
8341
8342         rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8343         if (rc)
8344                 return rc;
8345
8346         req->fid = cpu_to_le16(0xffff);
8347         req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8348         req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8349         if (size == 128)
8350                 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8351
8352         return hwrm_req_send(bp, req);
8353 }
8354
8355 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8356 {
8357         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8358         int rc;
8359
8360         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8361                 goto skip_rss_ctx;
8362
8363         /* allocate context for vnic */
8364         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8365         if (rc) {
8366                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8367                            vnic_id, rc);
8368                 goto vnic_setup_err;
8369         }
8370         bp->rsscos_nr_ctxs++;
8371
8372         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8373                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8374                 if (rc) {
8375                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8376                                    vnic_id, rc);
8377                         goto vnic_setup_err;
8378                 }
8379                 bp->rsscos_nr_ctxs++;
8380         }
8381
8382 skip_rss_ctx:
8383         /* configure default vnic, ring grp */
8384         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8385         if (rc) {
8386                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8387                            vnic_id, rc);
8388                 goto vnic_setup_err;
8389         }
8390
8391         /* Enable RSS hashing on vnic */
8392         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8393         if (rc) {
8394                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8395                            vnic_id, rc);
8396                 goto vnic_setup_err;
8397         }
8398
8399         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8400                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8401                 if (rc) {
8402                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8403                                    vnic_id, rc);
8404                 }
8405         }
8406
8407 vnic_setup_err:
8408         return rc;
8409 }
8410
8411 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8412 {
8413         int rc, i, nr_ctxs;
8414
8415         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8416         for (i = 0; i < nr_ctxs; i++) {
8417                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8418                 if (rc) {
8419                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8420                                    vnic_id, i, rc);
8421                         break;
8422                 }
8423                 bp->rsscos_nr_ctxs++;
8424         }
8425         if (i < nr_ctxs)
8426                 return -ENOMEM;
8427
8428         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8429         if (rc) {
8430                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8431                            vnic_id, rc);
8432                 return rc;
8433         }
8434         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8435         if (rc) {
8436                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8437                            vnic_id, rc);
8438                 return rc;
8439         }
8440         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8441                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8442                 if (rc) {
8443                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8444                                    vnic_id, rc);
8445                 }
8446         }
8447         return rc;
8448 }
8449
8450 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8451 {
8452         if (bp->flags & BNXT_FLAG_CHIP_P5)
8453                 return __bnxt_setup_vnic_p5(bp, vnic_id);
8454         else
8455                 return __bnxt_setup_vnic(bp, vnic_id);
8456 }
8457
8458 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8459 {
8460 #ifdef CONFIG_RFS_ACCEL
8461         int i, rc = 0;
8462
8463         if (bp->flags & BNXT_FLAG_CHIP_P5)
8464                 return 0;
8465
8466         for (i = 0; i < bp->rx_nr_rings; i++) {
8467                 struct bnxt_vnic_info *vnic;
8468                 u16 vnic_id = i + 1;
8469                 u16 ring_id = i;
8470
8471                 if (vnic_id >= bp->nr_vnics)
8472                         break;
8473
8474                 vnic = &bp->vnic_info[vnic_id];
8475                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8476                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8477                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8478                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8479                 if (rc) {
8480                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8481                                    vnic_id, rc);
8482                         break;
8483                 }
8484                 rc = bnxt_setup_vnic(bp, vnic_id);
8485                 if (rc)
8486                         break;
8487         }
8488         return rc;
8489 #else
8490         return 0;
8491 #endif
8492 }
8493
8494 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8495 static bool bnxt_promisc_ok(struct bnxt *bp)
8496 {
8497 #ifdef CONFIG_BNXT_SRIOV
8498         if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8499                 return false;
8500 #endif
8501         return true;
8502 }
8503
8504 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8505 {
8506         unsigned int rc = 0;
8507
8508         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8509         if (rc) {
8510                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8511                            rc);
8512                 return rc;
8513         }
8514
8515         rc = bnxt_hwrm_vnic_cfg(bp, 1);
8516         if (rc) {
8517                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8518                            rc);
8519                 return rc;
8520         }
8521         return rc;
8522 }
8523
8524 static int bnxt_cfg_rx_mode(struct bnxt *);
8525 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8526
8527 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8528 {
8529         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8530         int rc = 0;
8531         unsigned int rx_nr_rings = bp->rx_nr_rings;
8532
8533         if (irq_re_init) {
8534                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8535                 if (rc) {
8536                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8537                                    rc);
8538                         goto err_out;
8539                 }
8540         }
8541
8542         rc = bnxt_hwrm_ring_alloc(bp);
8543         if (rc) {
8544                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8545                 goto err_out;
8546         }
8547
8548         rc = bnxt_hwrm_ring_grp_alloc(bp);
8549         if (rc) {
8550                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8551                 goto err_out;
8552         }
8553
8554         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8555                 rx_nr_rings--;
8556
8557         /* default vnic 0 */
8558         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8559         if (rc) {
8560                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8561                 goto err_out;
8562         }
8563
8564         rc = bnxt_setup_vnic(bp, 0);
8565         if (rc)
8566                 goto err_out;
8567
8568         if (bp->flags & BNXT_FLAG_RFS) {
8569                 rc = bnxt_alloc_rfs_vnics(bp);
8570                 if (rc)
8571                         goto err_out;
8572         }
8573
8574         if (bp->flags & BNXT_FLAG_TPA) {
8575                 rc = bnxt_set_tpa(bp, true);
8576                 if (rc)
8577                         goto err_out;
8578         }
8579
8580         if (BNXT_VF(bp))
8581                 bnxt_update_vf_mac(bp);
8582
8583         /* Filter for default vnic 0 */
8584         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8585         if (rc) {
8586                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8587                 goto err_out;
8588         }
8589         vnic->uc_filter_count = 1;
8590
8591         vnic->rx_mask = 0;
8592         if (bp->dev->flags & IFF_BROADCAST)
8593                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8594
8595         if (bp->dev->flags & IFF_PROMISC)
8596                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8597
8598         if (bp->dev->flags & IFF_ALLMULTI) {
8599                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8600                 vnic->mc_list_count = 0;
8601         } else {
8602                 u32 mask = 0;
8603
8604                 bnxt_mc_list_updated(bp, &mask);
8605                 vnic->rx_mask |= mask;
8606         }
8607
8608         rc = bnxt_cfg_rx_mode(bp);
8609         if (rc)
8610                 goto err_out;
8611
8612         rc = bnxt_hwrm_set_coal(bp);
8613         if (rc)
8614                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8615                                 rc);
8616
8617         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8618                 rc = bnxt_setup_nitroa0_vnic(bp);
8619                 if (rc)
8620                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8621                                    rc);
8622         }
8623
8624         if (BNXT_VF(bp)) {
8625                 bnxt_hwrm_func_qcfg(bp);
8626                 netdev_update_features(bp->dev);
8627         }
8628
8629         return 0;
8630
8631 err_out:
8632         bnxt_hwrm_resource_free(bp, 0, true);
8633
8634         return rc;
8635 }
8636
8637 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8638 {
8639         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8640         return 0;
8641 }
8642
8643 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8644 {
8645         bnxt_init_cp_rings(bp);
8646         bnxt_init_rx_rings(bp);
8647         bnxt_init_tx_rings(bp);
8648         bnxt_init_ring_grps(bp, irq_re_init);
8649         bnxt_init_vnics(bp);
8650
8651         return bnxt_init_chip(bp, irq_re_init);
8652 }
8653
8654 static int bnxt_set_real_num_queues(struct bnxt *bp)
8655 {
8656         int rc;
8657         struct net_device *dev = bp->dev;
8658
8659         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8660                                           bp->tx_nr_rings_xdp);
8661         if (rc)
8662                 return rc;
8663
8664         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8665         if (rc)
8666                 return rc;
8667
8668 #ifdef CONFIG_RFS_ACCEL
8669         if (bp->flags & BNXT_FLAG_RFS)
8670                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8671 #endif
8672
8673         return rc;
8674 }
8675
8676 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8677                            bool shared)
8678 {
8679         int _rx = *rx, _tx = *tx;
8680
8681         if (shared) {
8682                 *rx = min_t(int, _rx, max);
8683                 *tx = min_t(int, _tx, max);
8684         } else {
8685                 if (max < 2)
8686                         return -ENOMEM;
8687
8688                 while (_rx + _tx > max) {
8689                         if (_rx > _tx && _rx > 1)
8690                                 _rx--;
8691                         else if (_tx > 1)
8692                                 _tx--;
8693                 }
8694                 *rx = _rx;
8695                 *tx = _tx;
8696         }
8697         return 0;
8698 }
8699
8700 static void bnxt_setup_msix(struct bnxt *bp)
8701 {
8702         const int len = sizeof(bp->irq_tbl[0].name);
8703         struct net_device *dev = bp->dev;
8704         int tcs, i;
8705
8706         tcs = netdev_get_num_tc(dev);
8707         if (tcs) {
8708                 int i, off, count;
8709
8710                 for (i = 0; i < tcs; i++) {
8711                         count = bp->tx_nr_rings_per_tc;
8712                         off = i * count;
8713                         netdev_set_tc_queue(dev, i, count, off);
8714                 }
8715         }
8716
8717         for (i = 0; i < bp->cp_nr_rings; i++) {
8718                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8719                 char *attr;
8720
8721                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8722                         attr = "TxRx";
8723                 else if (i < bp->rx_nr_rings)
8724                         attr = "rx";
8725                 else
8726                         attr = "tx";
8727
8728                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8729                          attr, i);
8730                 bp->irq_tbl[map_idx].handler = bnxt_msix;
8731         }
8732 }
8733
8734 static void bnxt_setup_inta(struct bnxt *bp)
8735 {
8736         const int len = sizeof(bp->irq_tbl[0].name);
8737
8738         if (netdev_get_num_tc(bp->dev))
8739                 netdev_reset_tc(bp->dev);
8740
8741         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8742                  0);
8743         bp->irq_tbl[0].handler = bnxt_inta;
8744 }
8745
8746 static int bnxt_init_int_mode(struct bnxt *bp);
8747
8748 static int bnxt_setup_int_mode(struct bnxt *bp)
8749 {
8750         int rc;
8751
8752         if (!bp->irq_tbl) {
8753                 rc = bnxt_init_int_mode(bp);
8754                 if (rc || !bp->irq_tbl)
8755                         return rc ?: -ENODEV;
8756         }
8757
8758         if (bp->flags & BNXT_FLAG_USING_MSIX)
8759                 bnxt_setup_msix(bp);
8760         else
8761                 bnxt_setup_inta(bp);
8762
8763         rc = bnxt_set_real_num_queues(bp);
8764         return rc;
8765 }
8766
8767 #ifdef CONFIG_RFS_ACCEL
8768 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8769 {
8770         return bp->hw_resc.max_rsscos_ctxs;
8771 }
8772
8773 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8774 {
8775         return bp->hw_resc.max_vnics;
8776 }
8777 #endif
8778
8779 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8780 {
8781         return bp->hw_resc.max_stat_ctxs;
8782 }
8783
8784 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8785 {
8786         return bp->hw_resc.max_cp_rings;
8787 }
8788
8789 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8790 {
8791         unsigned int cp = bp->hw_resc.max_cp_rings;
8792
8793         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8794                 cp -= bnxt_get_ulp_msix_num(bp);
8795
8796         return cp;
8797 }
8798
8799 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8800 {
8801         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8802
8803         if (bp->flags & BNXT_FLAG_CHIP_P5)
8804                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8805
8806         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8807 }
8808
8809 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8810 {
8811         bp->hw_resc.max_irqs = max_irqs;
8812 }
8813
8814 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8815 {
8816         unsigned int cp;
8817
8818         cp = bnxt_get_max_func_cp_rings_for_en(bp);
8819         if (bp->flags & BNXT_FLAG_CHIP_P5)
8820                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8821         else
8822                 return cp - bp->cp_nr_rings;
8823 }
8824
8825 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8826 {
8827         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8828 }
8829
8830 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8831 {
8832         int max_cp = bnxt_get_max_func_cp_rings(bp);
8833         int max_irq = bnxt_get_max_func_irqs(bp);
8834         int total_req = bp->cp_nr_rings + num;
8835         int max_idx, avail_msix;
8836
8837         max_idx = bp->total_irqs;
8838         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8839                 max_idx = min_t(int, bp->total_irqs, max_cp);
8840         avail_msix = max_idx - bp->cp_nr_rings;
8841         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8842                 return avail_msix;
8843
8844         if (max_irq < total_req) {
8845                 num = max_irq - bp->cp_nr_rings;
8846                 if (num <= 0)
8847                         return 0;
8848         }
8849         return num;
8850 }
8851
8852 static int bnxt_get_num_msix(struct bnxt *bp)
8853 {
8854         if (!BNXT_NEW_RM(bp))
8855                 return bnxt_get_max_func_irqs(bp);
8856
8857         return bnxt_nq_rings_in_use(bp);
8858 }
8859
8860 static int bnxt_init_msix(struct bnxt *bp)
8861 {
8862         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8863         struct msix_entry *msix_ent;
8864
8865         total_vecs = bnxt_get_num_msix(bp);
8866         max = bnxt_get_max_func_irqs(bp);
8867         if (total_vecs > max)
8868                 total_vecs = max;
8869
8870         if (!total_vecs)
8871                 return 0;
8872
8873         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8874         if (!msix_ent)
8875                 return -ENOMEM;
8876
8877         for (i = 0; i < total_vecs; i++) {
8878                 msix_ent[i].entry = i;
8879                 msix_ent[i].vector = 0;
8880         }
8881
8882         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8883                 min = 2;
8884
8885         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8886         ulp_msix = bnxt_get_ulp_msix_num(bp);
8887         if (total_vecs < 0 || total_vecs < ulp_msix) {
8888                 rc = -ENODEV;
8889                 goto msix_setup_exit;
8890         }
8891
8892         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8893         if (bp->irq_tbl) {
8894                 for (i = 0; i < total_vecs; i++)
8895                         bp->irq_tbl[i].vector = msix_ent[i].vector;
8896
8897                 bp->total_irqs = total_vecs;
8898                 /* Trim rings based upon num of vectors allocated */
8899                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8900                                      total_vecs - ulp_msix, min == 1);
8901                 if (rc)
8902                         goto msix_setup_exit;
8903
8904                 bp->cp_nr_rings = (min == 1) ?
8905                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8906                                   bp->tx_nr_rings + bp->rx_nr_rings;
8907
8908         } else {
8909                 rc = -ENOMEM;
8910                 goto msix_setup_exit;
8911         }
8912         bp->flags |= BNXT_FLAG_USING_MSIX;
8913         kfree(msix_ent);
8914         return 0;
8915
8916 msix_setup_exit:
8917         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8918         kfree(bp->irq_tbl);
8919         bp->irq_tbl = NULL;
8920         pci_disable_msix(bp->pdev);
8921         kfree(msix_ent);
8922         return rc;
8923 }
8924
8925 static int bnxt_init_inta(struct bnxt *bp)
8926 {
8927         bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
8928         if (!bp->irq_tbl)
8929                 return -ENOMEM;
8930
8931         bp->total_irqs = 1;
8932         bp->rx_nr_rings = 1;
8933         bp->tx_nr_rings = 1;
8934         bp->cp_nr_rings = 1;
8935         bp->flags |= BNXT_FLAG_SHARED_RINGS;
8936         bp->irq_tbl[0].vector = bp->pdev->irq;
8937         return 0;
8938 }
8939
8940 static int bnxt_init_int_mode(struct bnxt *bp)
8941 {
8942         int rc = -ENODEV;
8943
8944         if (bp->flags & BNXT_FLAG_MSIX_CAP)
8945                 rc = bnxt_init_msix(bp);
8946
8947         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8948                 /* fallback to INTA */
8949                 rc = bnxt_init_inta(bp);
8950         }
8951         return rc;
8952 }
8953
8954 static void bnxt_clear_int_mode(struct bnxt *bp)
8955 {
8956         if (bp->flags & BNXT_FLAG_USING_MSIX)
8957                 pci_disable_msix(bp->pdev);
8958
8959         kfree(bp->irq_tbl);
8960         bp->irq_tbl = NULL;
8961         bp->flags &= ~BNXT_FLAG_USING_MSIX;
8962 }
8963
8964 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8965 {
8966         int tcs = netdev_get_num_tc(bp->dev);
8967         bool irq_cleared = false;
8968         int rc;
8969
8970         if (!bnxt_need_reserve_rings(bp))
8971                 return 0;
8972
8973         if (irq_re_init && BNXT_NEW_RM(bp) &&
8974             bnxt_get_num_msix(bp) != bp->total_irqs) {
8975                 bnxt_ulp_irq_stop(bp);
8976                 bnxt_clear_int_mode(bp);
8977                 irq_cleared = true;
8978         }
8979         rc = __bnxt_reserve_rings(bp);
8980         if (irq_cleared) {
8981                 if (!rc)
8982                         rc = bnxt_init_int_mode(bp);
8983                 bnxt_ulp_irq_restart(bp, rc);
8984         }
8985         if (rc) {
8986                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8987                 return rc;
8988         }
8989         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8990                 netdev_err(bp->dev, "tx ring reservation failure\n");
8991                 netdev_reset_tc(bp->dev);
8992                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8993                 return -ENOMEM;
8994         }
8995         return 0;
8996 }
8997
8998 static void bnxt_free_irq(struct bnxt *bp)
8999 {
9000         struct bnxt_irq *irq;
9001         int i;
9002
9003 #ifdef CONFIG_RFS_ACCEL
9004         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9005         bp->dev->rx_cpu_rmap = NULL;
9006 #endif
9007         if (!bp->irq_tbl || !bp->bnapi)
9008                 return;
9009
9010         for (i = 0; i < bp->cp_nr_rings; i++) {
9011                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9012
9013                 irq = &bp->irq_tbl[map_idx];
9014                 if (irq->requested) {
9015                         if (irq->have_cpumask) {
9016                                 irq_set_affinity_hint(irq->vector, NULL);
9017                                 free_cpumask_var(irq->cpu_mask);
9018                                 irq->have_cpumask = 0;
9019                         }
9020                         free_irq(irq->vector, bp->bnapi[i]);
9021                 }
9022
9023                 irq->requested = 0;
9024         }
9025 }
9026
9027 static int bnxt_request_irq(struct bnxt *bp)
9028 {
9029         int i, j, rc = 0;
9030         unsigned long flags = 0;
9031 #ifdef CONFIG_RFS_ACCEL
9032         struct cpu_rmap *rmap;
9033 #endif
9034
9035         rc = bnxt_setup_int_mode(bp);
9036         if (rc) {
9037                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9038                            rc);
9039                 return rc;
9040         }
9041 #ifdef CONFIG_RFS_ACCEL
9042         rmap = bp->dev->rx_cpu_rmap;
9043 #endif
9044         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9045                 flags = IRQF_SHARED;
9046
9047         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9048                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9049                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9050
9051 #ifdef CONFIG_RFS_ACCEL
9052                 if (rmap && bp->bnapi[i]->rx_ring) {
9053                         rc = irq_cpu_rmap_add(rmap, irq->vector);
9054                         if (rc)
9055                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9056                                             j);
9057                         j++;
9058                 }
9059 #endif
9060                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9061                                  bp->bnapi[i]);
9062                 if (rc)
9063                         break;
9064
9065                 irq->requested = 1;
9066
9067                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9068                         int numa_node = dev_to_node(&bp->pdev->dev);
9069
9070                         irq->have_cpumask = 1;
9071                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9072                                         irq->cpu_mask);
9073                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9074                         if (rc) {
9075                                 netdev_warn(bp->dev,
9076                                             "Set affinity failed, IRQ = %d\n",
9077                                             irq->vector);
9078                                 break;
9079                         }
9080                 }
9081         }
9082         return rc;
9083 }
9084
9085 static void bnxt_del_napi(struct bnxt *bp)
9086 {
9087         int i;
9088
9089         if (!bp->bnapi)
9090                 return;
9091
9092         for (i = 0; i < bp->cp_nr_rings; i++) {
9093                 struct bnxt_napi *bnapi = bp->bnapi[i];
9094
9095                 __netif_napi_del(&bnapi->napi);
9096         }
9097         /* We called __netif_napi_del(), we need
9098          * to respect an RCU grace period before freeing napi structures.
9099          */
9100         synchronize_net();
9101 }
9102
9103 static void bnxt_init_napi(struct bnxt *bp)
9104 {
9105         int i;
9106         unsigned int cp_nr_rings = bp->cp_nr_rings;
9107         struct bnxt_napi *bnapi;
9108
9109         if (bp->flags & BNXT_FLAG_USING_MSIX) {
9110                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9111
9112                 if (bp->flags & BNXT_FLAG_CHIP_P5)
9113                         poll_fn = bnxt_poll_p5;
9114                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9115                         cp_nr_rings--;
9116                 for (i = 0; i < cp_nr_rings; i++) {
9117                         bnapi = bp->bnapi[i];
9118                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
9119                 }
9120                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9121                         bnapi = bp->bnapi[cp_nr_rings];
9122                         netif_napi_add(bp->dev, &bnapi->napi,
9123                                        bnxt_poll_nitroa0, 64);
9124                 }
9125         } else {
9126                 bnapi = bp->bnapi[0];
9127                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
9128         }
9129 }
9130
9131 static void bnxt_disable_napi(struct bnxt *bp)
9132 {
9133         int i;
9134
9135         if (!bp->bnapi ||
9136             test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9137                 return;
9138
9139         for (i = 0; i < bp->cp_nr_rings; i++) {
9140                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9141
9142                 napi_disable(&bp->bnapi[i]->napi);
9143                 if (bp->bnapi[i]->rx_ring)
9144                         cancel_work_sync(&cpr->dim.work);
9145         }
9146 }
9147
9148 static void bnxt_enable_napi(struct bnxt *bp)
9149 {
9150         int i;
9151
9152         clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9153         for (i = 0; i < bp->cp_nr_rings; i++) {
9154                 struct bnxt_napi *bnapi = bp->bnapi[i];
9155                 struct bnxt_cp_ring_info *cpr;
9156
9157                 cpr = &bnapi->cp_ring;
9158                 if (bnapi->in_reset)
9159                         cpr->sw_stats.rx.rx_resets++;
9160                 bnapi->in_reset = false;
9161
9162                 if (bnapi->rx_ring) {
9163                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9164                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9165                 }
9166                 napi_enable(&bnapi->napi);
9167         }
9168 }
9169
9170 void bnxt_tx_disable(struct bnxt *bp)
9171 {
9172         int i;
9173         struct bnxt_tx_ring_info *txr;
9174
9175         if (bp->tx_ring) {
9176                 for (i = 0; i < bp->tx_nr_rings; i++) {
9177                         txr = &bp->tx_ring[i];
9178                         WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
9179                 }
9180         }
9181         /* Make sure napi polls see @dev_state change */
9182         synchronize_net();
9183         /* Drop carrier first to prevent TX timeout */
9184         netif_carrier_off(bp->dev);
9185         /* Stop all TX queues */
9186         netif_tx_disable(bp->dev);
9187 }
9188
9189 void bnxt_tx_enable(struct bnxt *bp)
9190 {
9191         int i;
9192         struct bnxt_tx_ring_info *txr;
9193
9194         for (i = 0; i < bp->tx_nr_rings; i++) {
9195                 txr = &bp->tx_ring[i];
9196                 WRITE_ONCE(txr->dev_state, 0);
9197         }
9198         /* Make sure napi polls see @dev_state change */
9199         synchronize_net();
9200         netif_tx_wake_all_queues(bp->dev);
9201         if (bp->link_info.link_up)
9202                 netif_carrier_on(bp->dev);
9203 }
9204
9205 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9206 {
9207         u8 active_fec = link_info->active_fec_sig_mode &
9208                         PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9209
9210         switch (active_fec) {
9211         default:
9212         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9213                 return "None";
9214         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9215                 return "Clause 74 BaseR";
9216         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9217                 return "Clause 91 RS(528,514)";
9218         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9219                 return "Clause 91 RS544_1XN";
9220         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9221                 return "Clause 91 RS(544,514)";
9222         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9223                 return "Clause 91 RS272_1XN";
9224         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9225                 return "Clause 91 RS(272,257)";
9226         }
9227 }
9228
9229 static void bnxt_report_link(struct bnxt *bp)
9230 {
9231         if (bp->link_info.link_up) {
9232                 const char *signal = "";
9233                 const char *flow_ctrl;
9234                 const char *duplex;
9235                 u32 speed;
9236                 u16 fec;
9237
9238                 netif_carrier_on(bp->dev);
9239                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9240                 if (speed == SPEED_UNKNOWN) {
9241                         netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9242                         return;
9243                 }
9244                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9245                         duplex = "full";
9246                 else
9247                         duplex = "half";
9248                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9249                         flow_ctrl = "ON - receive & transmit";
9250                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9251                         flow_ctrl = "ON - transmit";
9252                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9253                         flow_ctrl = "ON - receive";
9254                 else
9255                         flow_ctrl = "none";
9256                 if (bp->link_info.phy_qcfg_resp.option_flags &
9257                     PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9258                         u8 sig_mode = bp->link_info.active_fec_sig_mode &
9259                                       PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9260                         switch (sig_mode) {
9261                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9262                                 signal = "(NRZ) ";
9263                                 break;
9264                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9265                                 signal = "(PAM4) ";
9266                                 break;
9267                         default:
9268                                 break;
9269                         }
9270                 }
9271                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9272                             speed, signal, duplex, flow_ctrl);
9273                 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9274                         netdev_info(bp->dev, "EEE is %s\n",
9275                                     bp->eee.eee_active ? "active" :
9276                                                          "not active");
9277                 fec = bp->link_info.fec_cfg;
9278                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9279                         netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9280                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9281                                     bnxt_report_fec(&bp->link_info));
9282         } else {
9283                 netif_carrier_off(bp->dev);
9284                 netdev_err(bp->dev, "NIC Link is Down\n");
9285         }
9286 }
9287
9288 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9289 {
9290         if (!resp->supported_speeds_auto_mode &&
9291             !resp->supported_speeds_force_mode &&
9292             !resp->supported_pam4_speeds_auto_mode &&
9293             !resp->supported_pam4_speeds_force_mode)
9294                 return true;
9295         return false;
9296 }
9297
9298 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9299 {
9300         struct bnxt_link_info *link_info = &bp->link_info;
9301         struct hwrm_port_phy_qcaps_output *resp;
9302         struct hwrm_port_phy_qcaps_input *req;
9303         int rc = 0;
9304
9305         if (bp->hwrm_spec_code < 0x10201)
9306                 return 0;
9307
9308         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9309         if (rc)
9310                 return rc;
9311
9312         resp = hwrm_req_hold(bp, req);
9313         rc = hwrm_req_send(bp, req);
9314         if (rc)
9315                 goto hwrm_phy_qcaps_exit;
9316
9317         bp->phy_flags = resp->flags;
9318         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9319                 struct ethtool_eee *eee = &bp->eee;
9320                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9321
9322                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9323                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9324                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9325                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9326                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9327         }
9328
9329         if (bp->hwrm_spec_code >= 0x10a01) {
9330                 if (bnxt_phy_qcaps_no_speed(resp)) {
9331                         link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9332                         netdev_warn(bp->dev, "Ethernet link disabled\n");
9333                 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9334                         link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9335                         netdev_info(bp->dev, "Ethernet link enabled\n");
9336                         /* Phy re-enabled, reprobe the speeds */
9337                         link_info->support_auto_speeds = 0;
9338                         link_info->support_pam4_auto_speeds = 0;
9339                 }
9340         }
9341         if (resp->supported_speeds_auto_mode)
9342                 link_info->support_auto_speeds =
9343                         le16_to_cpu(resp->supported_speeds_auto_mode);
9344         if (resp->supported_pam4_speeds_auto_mode)
9345                 link_info->support_pam4_auto_speeds =
9346                         le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9347
9348         bp->port_count = resp->port_cnt;
9349
9350 hwrm_phy_qcaps_exit:
9351         hwrm_req_drop(bp, req);
9352         return rc;
9353 }
9354
9355 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9356 {
9357         u16 diff = advertising ^ supported;
9358
9359         return ((supported | diff) != supported);
9360 }
9361
9362 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9363 {
9364         struct bnxt_link_info *link_info = &bp->link_info;
9365         struct hwrm_port_phy_qcfg_output *resp;
9366         struct hwrm_port_phy_qcfg_input *req;
9367         u8 link_up = link_info->link_up;
9368         bool support_changed = false;
9369         int rc;
9370
9371         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9372         if (rc)
9373                 return rc;
9374
9375         resp = hwrm_req_hold(bp, req);
9376         rc = hwrm_req_send(bp, req);
9377         if (rc) {
9378                 hwrm_req_drop(bp, req);
9379                 return rc;
9380         }
9381
9382         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9383         link_info->phy_link_status = resp->link;
9384         link_info->duplex = resp->duplex_cfg;
9385         if (bp->hwrm_spec_code >= 0x10800)
9386                 link_info->duplex = resp->duplex_state;
9387         link_info->pause = resp->pause;
9388         link_info->auto_mode = resp->auto_mode;
9389         link_info->auto_pause_setting = resp->auto_pause;
9390         link_info->lp_pause = resp->link_partner_adv_pause;
9391         link_info->force_pause_setting = resp->force_pause;
9392         link_info->duplex_setting = resp->duplex_cfg;
9393         if (link_info->phy_link_status == BNXT_LINK_LINK)
9394                 link_info->link_speed = le16_to_cpu(resp->link_speed);
9395         else
9396                 link_info->link_speed = 0;
9397         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9398         link_info->force_pam4_link_speed =
9399                 le16_to_cpu(resp->force_pam4_link_speed);
9400         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9401         link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9402         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9403         link_info->auto_pam4_link_speeds =
9404                 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9405         link_info->lp_auto_link_speeds =
9406                 le16_to_cpu(resp->link_partner_adv_speeds);
9407         link_info->lp_auto_pam4_link_speeds =
9408                 resp->link_partner_pam4_adv_speeds;
9409         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9410         link_info->phy_ver[0] = resp->phy_maj;
9411         link_info->phy_ver[1] = resp->phy_min;
9412         link_info->phy_ver[2] = resp->phy_bld;
9413         link_info->media_type = resp->media_type;
9414         link_info->phy_type = resp->phy_type;
9415         link_info->transceiver = resp->xcvr_pkg_type;
9416         link_info->phy_addr = resp->eee_config_phy_addr &
9417                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9418         link_info->module_status = resp->module_status;
9419
9420         if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9421                 struct ethtool_eee *eee = &bp->eee;
9422                 u16 fw_speeds;
9423
9424                 eee->eee_active = 0;
9425                 if (resp->eee_config_phy_addr &
9426                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9427                         eee->eee_active = 1;
9428                         fw_speeds = le16_to_cpu(
9429                                 resp->link_partner_adv_eee_link_speed_mask);
9430                         eee->lp_advertised =
9431                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9432                 }
9433
9434                 /* Pull initial EEE config */
9435                 if (!chng_link_state) {
9436                         if (resp->eee_config_phy_addr &
9437                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9438                                 eee->eee_enabled = 1;
9439
9440                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9441                         eee->advertised =
9442                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9443
9444                         if (resp->eee_config_phy_addr &
9445                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9446                                 __le32 tmr;
9447
9448                                 eee->tx_lpi_enabled = 1;
9449                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9450                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9451                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9452                         }
9453                 }
9454         }
9455
9456         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9457         if (bp->hwrm_spec_code >= 0x10504) {
9458                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9459                 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9460         }
9461         /* TODO: need to add more logic to report VF link */
9462         if (chng_link_state) {
9463                 if (link_info->phy_link_status == BNXT_LINK_LINK)
9464                         link_info->link_up = 1;
9465                 else
9466                         link_info->link_up = 0;
9467                 if (link_up != link_info->link_up)
9468                         bnxt_report_link(bp);
9469         } else {
9470                 /* alwasy link down if not require to update link state */
9471                 link_info->link_up = 0;
9472         }
9473         hwrm_req_drop(bp, req);
9474
9475         if (!BNXT_PHY_CFG_ABLE(bp))
9476                 return 0;
9477
9478         /* Check if any advertised speeds are no longer supported. The caller
9479          * holds the link_lock mutex, so we can modify link_info settings.
9480          */
9481         if (bnxt_support_dropped(link_info->advertising,
9482                                  link_info->support_auto_speeds)) {
9483                 link_info->advertising = link_info->support_auto_speeds;
9484                 support_changed = true;
9485         }
9486         if (bnxt_support_dropped(link_info->advertising_pam4,
9487                                  link_info->support_pam4_auto_speeds)) {
9488                 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9489                 support_changed = true;
9490         }
9491         if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9492                 bnxt_hwrm_set_link_setting(bp, true, false);
9493         return 0;
9494 }
9495
9496 static void bnxt_get_port_module_status(struct bnxt *bp)
9497 {
9498         struct bnxt_link_info *link_info = &bp->link_info;
9499         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9500         u8 module_status;
9501
9502         if (bnxt_update_link(bp, true))
9503                 return;
9504
9505         module_status = link_info->module_status;
9506         switch (module_status) {
9507         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9508         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9509         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9510                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9511                             bp->pf.port_id);
9512                 if (bp->hwrm_spec_code >= 0x10201) {
9513                         netdev_warn(bp->dev, "Module part number %s\n",
9514                                     resp->phy_vendor_partnumber);
9515                 }
9516                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9517                         netdev_warn(bp->dev, "TX is disabled\n");
9518                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9519                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9520         }
9521 }
9522
9523 static void
9524 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9525 {
9526         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9527                 if (bp->hwrm_spec_code >= 0x10201)
9528                         req->auto_pause =
9529                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9530                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9531                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9532                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9533                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9534                 req->enables |=
9535                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9536         } else {
9537                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9538                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9539                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9540                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9541                 req->enables |=
9542                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9543                 if (bp->hwrm_spec_code >= 0x10201) {
9544                         req->auto_pause = req->force_pause;
9545                         req->enables |= cpu_to_le32(
9546                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9547                 }
9548         }
9549 }
9550
9551 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9552 {
9553         if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9554                 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9555                 if (bp->link_info.advertising) {
9556                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9557                         req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9558                 }
9559                 if (bp->link_info.advertising_pam4) {
9560                         req->enables |=
9561                                 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9562                         req->auto_link_pam4_speed_mask =
9563                                 cpu_to_le16(bp->link_info.advertising_pam4);
9564                 }
9565                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9566                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9567         } else {
9568                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9569                 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9570                         req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9571                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9572                 } else {
9573                         req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9574                 }
9575         }
9576
9577         /* tell chimp that the setting takes effect immediately */
9578         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9579 }
9580
9581 int bnxt_hwrm_set_pause(struct bnxt *bp)
9582 {
9583         struct hwrm_port_phy_cfg_input *req;
9584         int rc;
9585
9586         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9587         if (rc)
9588                 return rc;
9589
9590         bnxt_hwrm_set_pause_common(bp, req);
9591
9592         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9593             bp->link_info.force_link_chng)
9594                 bnxt_hwrm_set_link_common(bp, req);
9595
9596         rc = hwrm_req_send(bp, req);
9597         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9598                 /* since changing of pause setting doesn't trigger any link
9599                  * change event, the driver needs to update the current pause
9600                  * result upon successfully return of the phy_cfg command
9601                  */
9602                 bp->link_info.pause =
9603                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9604                 bp->link_info.auto_pause_setting = 0;
9605                 if (!bp->link_info.force_link_chng)
9606                         bnxt_report_link(bp);
9607         }
9608         bp->link_info.force_link_chng = false;
9609         return rc;
9610 }
9611
9612 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9613                               struct hwrm_port_phy_cfg_input *req)
9614 {
9615         struct ethtool_eee *eee = &bp->eee;
9616
9617         if (eee->eee_enabled) {
9618                 u16 eee_speeds;
9619                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9620
9621                 if (eee->tx_lpi_enabled)
9622                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9623                 else
9624                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9625
9626                 req->flags |= cpu_to_le32(flags);
9627                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9628                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9629                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9630         } else {
9631                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9632         }
9633 }
9634
9635 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9636 {
9637         struct hwrm_port_phy_cfg_input *req;
9638         int rc;
9639
9640         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9641         if (rc)
9642                 return rc;
9643
9644         if (set_pause)
9645                 bnxt_hwrm_set_pause_common(bp, req);
9646
9647         bnxt_hwrm_set_link_common(bp, req);
9648
9649         if (set_eee)
9650                 bnxt_hwrm_set_eee(bp, req);
9651         return hwrm_req_send(bp, req);
9652 }
9653
9654 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9655 {
9656         struct hwrm_port_phy_cfg_input *req;
9657         int rc;
9658
9659         if (!BNXT_SINGLE_PF(bp))
9660                 return 0;
9661
9662         if (pci_num_vf(bp->pdev) &&
9663             !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9664                 return 0;
9665
9666         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9667         if (rc)
9668                 return rc;
9669
9670         req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9671         return hwrm_req_send(bp, req);
9672 }
9673
9674 static int bnxt_fw_init_one(struct bnxt *bp);
9675
9676 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9677 {
9678 #ifdef CONFIG_TEE_BNXT_FW
9679         int rc = tee_bnxt_fw_load();
9680
9681         if (rc)
9682                 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9683
9684         return rc;
9685 #else
9686         netdev_err(bp->dev, "OP-TEE not supported\n");
9687         return -ENODEV;
9688 #endif
9689 }
9690
9691 static int bnxt_try_recover_fw(struct bnxt *bp)
9692 {
9693         if (bp->fw_health && bp->fw_health->status_reliable) {
9694                 int retry = 0, rc;
9695                 u32 sts;
9696
9697                 do {
9698                         sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9699                         rc = bnxt_hwrm_poll(bp);
9700                         if (!BNXT_FW_IS_BOOTING(sts) &&
9701                             !BNXT_FW_IS_RECOVERING(sts))
9702                                 break;
9703                         retry++;
9704                 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9705
9706                 if (!BNXT_FW_IS_HEALTHY(sts)) {
9707                         netdev_err(bp->dev,
9708                                    "Firmware not responding, status: 0x%x\n",
9709                                    sts);
9710                         rc = -ENODEV;
9711                 }
9712                 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9713                         netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9714                         return bnxt_fw_reset_via_optee(bp);
9715                 }
9716                 return rc;
9717         }
9718
9719         return -ENODEV;
9720 }
9721
9722 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9723 {
9724         struct hwrm_func_drv_if_change_output *resp;
9725         struct hwrm_func_drv_if_change_input *req;
9726         bool fw_reset = !bp->irq_tbl;
9727         bool resc_reinit = false;
9728         int rc, retry = 0;
9729         u32 flags = 0;
9730
9731         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9732                 return 0;
9733
9734         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
9735         if (rc)
9736                 return rc;
9737
9738         if (up)
9739                 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9740         resp = hwrm_req_hold(bp, req);
9741
9742         hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
9743         while (retry < BNXT_FW_IF_RETRY) {
9744                 rc = hwrm_req_send(bp, req);
9745                 if (rc != -EAGAIN)
9746                         break;
9747
9748                 msleep(50);
9749                 retry++;
9750         }
9751
9752         if (rc == -EAGAIN) {
9753                 hwrm_req_drop(bp, req);
9754                 return rc;
9755         } else if (!rc) {
9756                 flags = le32_to_cpu(resp->flags);
9757         } else if (up) {
9758                 rc = bnxt_try_recover_fw(bp);
9759                 fw_reset = true;
9760         }
9761         hwrm_req_drop(bp, req);
9762         if (rc)
9763                 return rc;
9764
9765         if (!up) {
9766                 bnxt_inv_fw_health_reg(bp);
9767                 return 0;
9768         }
9769
9770         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9771                 resc_reinit = true;
9772         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9773                 fw_reset = true;
9774         else if (bp->fw_health && !bp->fw_health->status_reliable)
9775                 bnxt_try_map_fw_health_reg(bp);
9776
9777         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9778                 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9779                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9780                 return -ENODEV;
9781         }
9782         if (resc_reinit || fw_reset) {
9783                 if (fw_reset) {
9784                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9785                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9786                                 bnxt_ulp_stop(bp);
9787                         bnxt_free_ctx_mem(bp);
9788                         kfree(bp->ctx);
9789                         bp->ctx = NULL;
9790                         bnxt_dcb_free(bp);
9791                         rc = bnxt_fw_init_one(bp);
9792                         if (rc) {
9793                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9794                                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9795                                 return rc;
9796                         }
9797                         bnxt_clear_int_mode(bp);
9798                         rc = bnxt_init_int_mode(bp);
9799                         if (rc) {
9800                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9801                                 netdev_err(bp->dev, "init int mode failed\n");
9802                                 return rc;
9803                         }
9804                 }
9805                 if (BNXT_NEW_RM(bp)) {
9806                         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9807
9808                         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9809                         if (rc)
9810                                 netdev_err(bp->dev, "resc_qcaps failed\n");
9811
9812                         hw_resc->resv_cp_rings = 0;
9813                         hw_resc->resv_stat_ctxs = 0;
9814                         hw_resc->resv_irqs = 0;
9815                         hw_resc->resv_tx_rings = 0;
9816                         hw_resc->resv_rx_rings = 0;
9817                         hw_resc->resv_hw_ring_grps = 0;
9818                         hw_resc->resv_vnics = 0;
9819                         if (!fw_reset) {
9820                                 bp->tx_nr_rings = 0;
9821                                 bp->rx_nr_rings = 0;
9822                         }
9823                 }
9824         }
9825         return rc;
9826 }
9827
9828 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9829 {
9830         struct hwrm_port_led_qcaps_output *resp;
9831         struct hwrm_port_led_qcaps_input *req;
9832         struct bnxt_pf_info *pf = &bp->pf;
9833         int rc;
9834
9835         bp->num_leds = 0;
9836         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9837                 return 0;
9838
9839         rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
9840         if (rc)
9841                 return rc;
9842
9843         req->port_id = cpu_to_le16(pf->port_id);
9844         resp = hwrm_req_hold(bp, req);
9845         rc = hwrm_req_send(bp, req);
9846         if (rc) {
9847                 hwrm_req_drop(bp, req);
9848                 return rc;
9849         }
9850         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9851                 int i;
9852
9853                 bp->num_leds = resp->num_leds;
9854                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9855                                                  bp->num_leds);
9856                 for (i = 0; i < bp->num_leds; i++) {
9857                         struct bnxt_led_info *led = &bp->leds[i];
9858                         __le16 caps = led->led_state_caps;
9859
9860                         if (!led->led_group_id ||
9861                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
9862                                 bp->num_leds = 0;
9863                                 break;
9864                         }
9865                 }
9866         }
9867         hwrm_req_drop(bp, req);
9868         return 0;
9869 }
9870
9871 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9872 {
9873         struct hwrm_wol_filter_alloc_output *resp;
9874         struct hwrm_wol_filter_alloc_input *req;
9875         int rc;
9876
9877         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
9878         if (rc)
9879                 return rc;
9880
9881         req->port_id = cpu_to_le16(bp->pf.port_id);
9882         req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9883         req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9884         memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
9885
9886         resp = hwrm_req_hold(bp, req);
9887         rc = hwrm_req_send(bp, req);
9888         if (!rc)
9889                 bp->wol_filter_id = resp->wol_filter_id;
9890         hwrm_req_drop(bp, req);
9891         return rc;
9892 }
9893
9894 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9895 {
9896         struct hwrm_wol_filter_free_input *req;
9897         int rc;
9898
9899         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
9900         if (rc)
9901                 return rc;
9902
9903         req->port_id = cpu_to_le16(bp->pf.port_id);
9904         req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9905         req->wol_filter_id = bp->wol_filter_id;
9906
9907         return hwrm_req_send(bp, req);
9908 }
9909
9910 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9911 {
9912         struct hwrm_wol_filter_qcfg_output *resp;
9913         struct hwrm_wol_filter_qcfg_input *req;
9914         u16 next_handle = 0;
9915         int rc;
9916
9917         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
9918         if (rc)
9919                 return rc;
9920
9921         req->port_id = cpu_to_le16(bp->pf.port_id);
9922         req->handle = cpu_to_le16(handle);
9923         resp = hwrm_req_hold(bp, req);
9924         rc = hwrm_req_send(bp, req);
9925         if (!rc) {
9926                 next_handle = le16_to_cpu(resp->next_handle);
9927                 if (next_handle != 0) {
9928                         if (resp->wol_type ==
9929                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9930                                 bp->wol = 1;
9931                                 bp->wol_filter_id = resp->wol_filter_id;
9932                         }
9933                 }
9934         }
9935         hwrm_req_drop(bp, req);
9936         return next_handle;
9937 }
9938
9939 static void bnxt_get_wol_settings(struct bnxt *bp)
9940 {
9941         u16 handle = 0;
9942
9943         bp->wol = 0;
9944         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9945                 return;
9946
9947         do {
9948                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9949         } while (handle && handle != 0xffff);
9950 }
9951
9952 #ifdef CONFIG_BNXT_HWMON
9953 static ssize_t bnxt_show_temp(struct device *dev,
9954                               struct device_attribute *devattr, char *buf)
9955 {
9956         struct hwrm_temp_monitor_query_output *resp;
9957         struct hwrm_temp_monitor_query_input *req;
9958         struct bnxt *bp = dev_get_drvdata(dev);
9959         u32 len = 0;
9960         int rc;
9961
9962         rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
9963         if (rc)
9964                 return rc;
9965         resp = hwrm_req_hold(bp, req);
9966         rc = hwrm_req_send(bp, req);
9967         if (!rc)
9968                 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9969         hwrm_req_drop(bp, req);
9970         if (rc)
9971                 return rc;
9972         return len;
9973 }
9974 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9975
9976 static struct attribute *bnxt_attrs[] = {
9977         &sensor_dev_attr_temp1_input.dev_attr.attr,
9978         NULL
9979 };
9980 ATTRIBUTE_GROUPS(bnxt);
9981
9982 static void bnxt_hwmon_close(struct bnxt *bp)
9983 {
9984         if (bp->hwmon_dev) {
9985                 hwmon_device_unregister(bp->hwmon_dev);
9986                 bp->hwmon_dev = NULL;
9987         }
9988 }
9989
9990 static void bnxt_hwmon_open(struct bnxt *bp)
9991 {
9992         struct hwrm_temp_monitor_query_input *req;
9993         struct pci_dev *pdev = bp->pdev;
9994         int rc;
9995
9996         rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
9997         if (!rc)
9998                 rc = hwrm_req_send_silent(bp, req);
9999         if (rc == -EACCES || rc == -EOPNOTSUPP) {
10000                 bnxt_hwmon_close(bp);
10001                 return;
10002         }
10003
10004         if (bp->hwmon_dev)
10005                 return;
10006
10007         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10008                                                           DRV_MODULE_NAME, bp,
10009                                                           bnxt_groups);
10010         if (IS_ERR(bp->hwmon_dev)) {
10011                 bp->hwmon_dev = NULL;
10012                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10013         }
10014 }
10015 #else
10016 static void bnxt_hwmon_close(struct bnxt *bp)
10017 {
10018 }
10019
10020 static void bnxt_hwmon_open(struct bnxt *bp)
10021 {
10022 }
10023 #endif
10024
10025 static bool bnxt_eee_config_ok(struct bnxt *bp)
10026 {
10027         struct ethtool_eee *eee = &bp->eee;
10028         struct bnxt_link_info *link_info = &bp->link_info;
10029
10030         if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10031                 return true;
10032
10033         if (eee->eee_enabled) {
10034                 u32 advertising =
10035                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10036
10037                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10038                         eee->eee_enabled = 0;
10039                         return false;
10040                 }
10041                 if (eee->advertised & ~advertising) {
10042                         eee->advertised = advertising & eee->supported;
10043                         return false;
10044                 }
10045         }
10046         return true;
10047 }
10048
10049 static int bnxt_update_phy_setting(struct bnxt *bp)
10050 {
10051         int rc;
10052         bool update_link = false;
10053         bool update_pause = false;
10054         bool update_eee = false;
10055         struct bnxt_link_info *link_info = &bp->link_info;
10056
10057         rc = bnxt_update_link(bp, true);
10058         if (rc) {
10059                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10060                            rc);
10061                 return rc;
10062         }
10063         if (!BNXT_SINGLE_PF(bp))
10064                 return 0;
10065
10066         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10067             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10068             link_info->req_flow_ctrl)
10069                 update_pause = true;
10070         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10071             link_info->force_pause_setting != link_info->req_flow_ctrl)
10072                 update_pause = true;
10073         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10074                 if (BNXT_AUTO_MODE(link_info->auto_mode))
10075                         update_link = true;
10076                 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10077                     link_info->req_link_speed != link_info->force_link_speed)
10078                         update_link = true;
10079                 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10080                          link_info->req_link_speed != link_info->force_pam4_link_speed)
10081                         update_link = true;
10082                 if (link_info->req_duplex != link_info->duplex_setting)
10083                         update_link = true;
10084         } else {
10085                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10086                         update_link = true;
10087                 if (link_info->advertising != link_info->auto_link_speeds ||
10088                     link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10089                         update_link = true;
10090         }
10091
10092         /* The last close may have shutdown the link, so need to call
10093          * PHY_CFG to bring it back up.
10094          */
10095         if (!bp->link_info.link_up)
10096                 update_link = true;
10097
10098         if (!bnxt_eee_config_ok(bp))
10099                 update_eee = true;
10100
10101         if (update_link)
10102                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10103         else if (update_pause)
10104                 rc = bnxt_hwrm_set_pause(bp);
10105         if (rc) {
10106                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10107                            rc);
10108                 return rc;
10109         }
10110
10111         return rc;
10112 }
10113
10114 /* Common routine to pre-map certain register block to different GRC window.
10115  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10116  * in PF and 3 windows in VF that can be customized to map in different
10117  * register blocks.
10118  */
10119 static void bnxt_preset_reg_win(struct bnxt *bp)
10120 {
10121         if (BNXT_PF(bp)) {
10122                 /* CAG registers map to GRC window #4 */
10123                 writel(BNXT_CAG_REG_BASE,
10124                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10125         }
10126 }
10127
10128 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10129
10130 static int bnxt_reinit_after_abort(struct bnxt *bp)
10131 {
10132         int rc;
10133
10134         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10135                 return -EBUSY;
10136
10137         if (bp->dev->reg_state == NETREG_UNREGISTERED)
10138                 return -ENODEV;
10139
10140         rc = bnxt_fw_init_one(bp);
10141         if (!rc) {
10142                 bnxt_clear_int_mode(bp);
10143                 rc = bnxt_init_int_mode(bp);
10144                 if (!rc) {
10145                         clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10146                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10147                 }
10148         }
10149         return rc;
10150 }
10151
10152 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10153 {
10154         int rc = 0;
10155
10156         bnxt_preset_reg_win(bp);
10157         netif_carrier_off(bp->dev);
10158         if (irq_re_init) {
10159                 /* Reserve rings now if none were reserved at driver probe. */
10160                 rc = bnxt_init_dflt_ring_mode(bp);
10161                 if (rc) {
10162                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10163                         return rc;
10164                 }
10165         }
10166         rc = bnxt_reserve_rings(bp, irq_re_init);
10167         if (rc)
10168                 return rc;
10169         if ((bp->flags & BNXT_FLAG_RFS) &&
10170             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10171                 /* disable RFS if falling back to INTA */
10172                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10173                 bp->flags &= ~BNXT_FLAG_RFS;
10174         }
10175
10176         rc = bnxt_alloc_mem(bp, irq_re_init);
10177         if (rc) {
10178                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10179                 goto open_err_free_mem;
10180         }
10181
10182         if (irq_re_init) {
10183                 bnxt_init_napi(bp);
10184                 rc = bnxt_request_irq(bp);
10185                 if (rc) {
10186                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10187                         goto open_err_irq;
10188                 }
10189         }
10190
10191         rc = bnxt_init_nic(bp, irq_re_init);
10192         if (rc) {
10193                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10194                 goto open_err_irq;
10195         }
10196
10197         bnxt_enable_napi(bp);
10198         bnxt_debug_dev_init(bp);
10199
10200         if (link_re_init) {
10201                 mutex_lock(&bp->link_lock);
10202                 rc = bnxt_update_phy_setting(bp);
10203                 mutex_unlock(&bp->link_lock);
10204                 if (rc) {
10205                         netdev_warn(bp->dev, "failed to update phy settings\n");
10206                         if (BNXT_SINGLE_PF(bp)) {
10207                                 bp->link_info.phy_retry = true;
10208                                 bp->link_info.phy_retry_expires =
10209                                         jiffies + 5 * HZ;
10210                         }
10211                 }
10212         }
10213
10214         if (irq_re_init)
10215                 udp_tunnel_nic_reset_ntf(bp->dev);
10216
10217         set_bit(BNXT_STATE_OPEN, &bp->state);
10218         bnxt_enable_int(bp);
10219         /* Enable TX queues */
10220         bnxt_tx_enable(bp);
10221         mod_timer(&bp->timer, jiffies + bp->current_interval);
10222         /* Poll link status and check for SFP+ module status */
10223         mutex_lock(&bp->link_lock);
10224         bnxt_get_port_module_status(bp);
10225         mutex_unlock(&bp->link_lock);
10226
10227         /* VF-reps may need to be re-opened after the PF is re-opened */
10228         if (BNXT_PF(bp))
10229                 bnxt_vf_reps_open(bp);
10230         return 0;
10231
10232 open_err_irq:
10233         bnxt_del_napi(bp);
10234
10235 open_err_free_mem:
10236         bnxt_free_skbs(bp);
10237         bnxt_free_irq(bp);
10238         bnxt_free_mem(bp, true);
10239         return rc;
10240 }
10241
10242 /* rtnl_lock held */
10243 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10244 {
10245         int rc = 0;
10246
10247         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10248                 rc = -EIO;
10249         if (!rc)
10250                 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10251         if (rc) {
10252                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10253                 dev_close(bp->dev);
10254         }
10255         return rc;
10256 }
10257
10258 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10259  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
10260  * self tests.
10261  */
10262 int bnxt_half_open_nic(struct bnxt *bp)
10263 {
10264         int rc = 0;
10265
10266         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10267                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10268                 rc = -ENODEV;
10269                 goto half_open_err;
10270         }
10271
10272         rc = bnxt_alloc_mem(bp, false);
10273         if (rc) {
10274                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10275                 goto half_open_err;
10276         }
10277         rc = bnxt_init_nic(bp, false);
10278         if (rc) {
10279                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10280                 goto half_open_err;
10281         }
10282         return 0;
10283
10284 half_open_err:
10285         bnxt_free_skbs(bp);
10286         bnxt_free_mem(bp, false);
10287         dev_close(bp->dev);
10288         return rc;
10289 }
10290
10291 /* rtnl_lock held, this call can only be made after a previous successful
10292  * call to bnxt_half_open_nic().
10293  */
10294 void bnxt_half_close_nic(struct bnxt *bp)
10295 {
10296         bnxt_hwrm_resource_free(bp, false, false);
10297         bnxt_free_skbs(bp);
10298         bnxt_free_mem(bp, false);
10299 }
10300
10301 static void bnxt_reenable_sriov(struct bnxt *bp)
10302 {
10303         if (BNXT_PF(bp)) {
10304                 struct bnxt_pf_info *pf = &bp->pf;
10305                 int n = pf->active_vfs;
10306
10307                 if (n)
10308                         bnxt_cfg_hw_sriov(bp, &n, true);
10309         }
10310 }
10311
10312 static int bnxt_open(struct net_device *dev)
10313 {
10314         struct bnxt *bp = netdev_priv(dev);
10315         int rc;
10316
10317         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10318                 rc = bnxt_reinit_after_abort(bp);
10319                 if (rc) {
10320                         if (rc == -EBUSY)
10321                                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10322                         else
10323                                 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10324                         return -ENODEV;
10325                 }
10326         }
10327
10328         rc = bnxt_hwrm_if_change(bp, true);
10329         if (rc)
10330                 return rc;
10331
10332         rc = __bnxt_open_nic(bp, true, true);
10333         if (rc) {
10334                 bnxt_hwrm_if_change(bp, false);
10335         } else {
10336                 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10337                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10338                                 bnxt_ulp_start(bp, 0);
10339                                 bnxt_reenable_sriov(bp);
10340                         }
10341                 }
10342                 bnxt_hwmon_open(bp);
10343         }
10344
10345         return rc;
10346 }
10347
10348 static bool bnxt_drv_busy(struct bnxt *bp)
10349 {
10350         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10351                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10352 }
10353
10354 static void bnxt_get_ring_stats(struct bnxt *bp,
10355                                 struct rtnl_link_stats64 *stats);
10356
10357 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10358                              bool link_re_init)
10359 {
10360         /* Close the VF-reps before closing PF */
10361         if (BNXT_PF(bp))
10362                 bnxt_vf_reps_close(bp);
10363
10364         /* Change device state to avoid TX queue wake up's */
10365         bnxt_tx_disable(bp);
10366
10367         clear_bit(BNXT_STATE_OPEN, &bp->state);
10368         smp_mb__after_atomic();
10369         while (bnxt_drv_busy(bp))
10370                 msleep(20);
10371
10372         /* Flush rings and and disable interrupts */
10373         bnxt_shutdown_nic(bp, irq_re_init);
10374
10375         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10376
10377         bnxt_debug_dev_exit(bp);
10378         bnxt_disable_napi(bp);
10379         del_timer_sync(&bp->timer);
10380         bnxt_free_skbs(bp);
10381
10382         /* Save ring stats before shutdown */
10383         if (bp->bnapi && irq_re_init)
10384                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10385         if (irq_re_init) {
10386                 bnxt_free_irq(bp);
10387                 bnxt_del_napi(bp);
10388         }
10389         bnxt_free_mem(bp, irq_re_init);
10390 }
10391
10392 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10393 {
10394         int rc = 0;
10395
10396         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10397                 /* If we get here, it means firmware reset is in progress
10398                  * while we are trying to close.  We can safely proceed with
10399                  * the close because we are holding rtnl_lock().  Some firmware
10400                  * messages may fail as we proceed to close.  We set the
10401                  * ABORT_ERR flag here so that the FW reset thread will later
10402                  * abort when it gets the rtnl_lock() and sees the flag.
10403                  */
10404                 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10405                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10406         }
10407
10408 #ifdef CONFIG_BNXT_SRIOV
10409         if (bp->sriov_cfg) {
10410                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10411                                                       !bp->sriov_cfg,
10412                                                       BNXT_SRIOV_CFG_WAIT_TMO);
10413                 if (rc)
10414                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10415         }
10416 #endif
10417         __bnxt_close_nic(bp, irq_re_init, link_re_init);
10418         return rc;
10419 }
10420
10421 static int bnxt_close(struct net_device *dev)
10422 {
10423         struct bnxt *bp = netdev_priv(dev);
10424
10425         bnxt_hwmon_close(bp);
10426         bnxt_close_nic(bp, true, true);
10427         bnxt_hwrm_shutdown_link(bp);
10428         bnxt_hwrm_if_change(bp, false);
10429         return 0;
10430 }
10431
10432 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10433                                    u16 *val)
10434 {
10435         struct hwrm_port_phy_mdio_read_output *resp;
10436         struct hwrm_port_phy_mdio_read_input *req;
10437         int rc;
10438
10439         if (bp->hwrm_spec_code < 0x10a00)
10440                 return -EOPNOTSUPP;
10441
10442         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10443         if (rc)
10444                 return rc;
10445
10446         req->port_id = cpu_to_le16(bp->pf.port_id);
10447         req->phy_addr = phy_addr;
10448         req->reg_addr = cpu_to_le16(reg & 0x1f);
10449         if (mdio_phy_id_is_c45(phy_addr)) {
10450                 req->cl45_mdio = 1;
10451                 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10452                 req->dev_addr = mdio_phy_id_devad(phy_addr);
10453                 req->reg_addr = cpu_to_le16(reg);
10454         }
10455
10456         resp = hwrm_req_hold(bp, req);
10457         rc = hwrm_req_send(bp, req);
10458         if (!rc)
10459                 *val = le16_to_cpu(resp->reg_data);
10460         hwrm_req_drop(bp, req);
10461         return rc;
10462 }
10463
10464 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10465                                     u16 val)
10466 {
10467         struct hwrm_port_phy_mdio_write_input *req;
10468         int rc;
10469
10470         if (bp->hwrm_spec_code < 0x10a00)
10471                 return -EOPNOTSUPP;
10472
10473         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10474         if (rc)
10475                 return rc;
10476
10477         req->port_id = cpu_to_le16(bp->pf.port_id);
10478         req->phy_addr = phy_addr;
10479         req->reg_addr = cpu_to_le16(reg & 0x1f);
10480         if (mdio_phy_id_is_c45(phy_addr)) {
10481                 req->cl45_mdio = 1;
10482                 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10483                 req->dev_addr = mdio_phy_id_devad(phy_addr);
10484                 req->reg_addr = cpu_to_le16(reg);
10485         }
10486         req->reg_data = cpu_to_le16(val);
10487
10488         return hwrm_req_send(bp, req);
10489 }
10490
10491 /* rtnl_lock held */
10492 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10493 {
10494         struct mii_ioctl_data *mdio = if_mii(ifr);
10495         struct bnxt *bp = netdev_priv(dev);
10496         int rc;
10497
10498         switch (cmd) {
10499         case SIOCGMIIPHY:
10500                 mdio->phy_id = bp->link_info.phy_addr;
10501
10502                 fallthrough;
10503         case SIOCGMIIREG: {
10504                 u16 mii_regval = 0;
10505
10506                 if (!netif_running(dev))
10507                         return -EAGAIN;
10508
10509                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10510                                              &mii_regval);
10511                 mdio->val_out = mii_regval;
10512                 return rc;
10513         }
10514
10515         case SIOCSMIIREG:
10516                 if (!netif_running(dev))
10517                         return -EAGAIN;
10518
10519                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10520                                                 mdio->val_in);
10521
10522         case SIOCSHWTSTAMP:
10523                 return bnxt_hwtstamp_set(dev, ifr);
10524
10525         case SIOCGHWTSTAMP:
10526                 return bnxt_hwtstamp_get(dev, ifr);
10527
10528         default:
10529                 /* do nothing */
10530                 break;
10531         }
10532         return -EOPNOTSUPP;
10533 }
10534
10535 static void bnxt_get_ring_stats(struct bnxt *bp,
10536                                 struct rtnl_link_stats64 *stats)
10537 {
10538         int i;
10539
10540         for (i = 0; i < bp->cp_nr_rings; i++) {
10541                 struct bnxt_napi *bnapi = bp->bnapi[i];
10542                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10543                 u64 *sw = cpr->stats.sw_stats;
10544
10545                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10546                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10547                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10548
10549                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10550                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10551                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10552
10553                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10554                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10555                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10556
10557                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10558                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10559                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10560
10561                 stats->rx_missed_errors +=
10562                         BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10563
10564                 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10565
10566                 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10567
10568                 stats->rx_dropped +=
10569                         cpr->sw_stats.rx.rx_netpoll_discards +
10570                         cpr->sw_stats.rx.rx_oom_discards;
10571         }
10572 }
10573
10574 static void bnxt_add_prev_stats(struct bnxt *bp,
10575                                 struct rtnl_link_stats64 *stats)
10576 {
10577         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10578
10579         stats->rx_packets += prev_stats->rx_packets;
10580         stats->tx_packets += prev_stats->tx_packets;
10581         stats->rx_bytes += prev_stats->rx_bytes;
10582         stats->tx_bytes += prev_stats->tx_bytes;
10583         stats->rx_missed_errors += prev_stats->rx_missed_errors;
10584         stats->multicast += prev_stats->multicast;
10585         stats->rx_dropped += prev_stats->rx_dropped;
10586         stats->tx_dropped += prev_stats->tx_dropped;
10587 }
10588
10589 static void
10590 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10591 {
10592         struct bnxt *bp = netdev_priv(dev);
10593
10594         set_bit(BNXT_STATE_READ_STATS, &bp->state);
10595         /* Make sure bnxt_close_nic() sees that we are reading stats before
10596          * we check the BNXT_STATE_OPEN flag.
10597          */
10598         smp_mb__after_atomic();
10599         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10600                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10601                 *stats = bp->net_stats_prev;
10602                 return;
10603         }
10604
10605         bnxt_get_ring_stats(bp, stats);
10606         bnxt_add_prev_stats(bp, stats);
10607
10608         if (bp->flags & BNXT_FLAG_PORT_STATS) {
10609                 u64 *rx = bp->port_stats.sw_stats;
10610                 u64 *tx = bp->port_stats.sw_stats +
10611                           BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10612
10613                 stats->rx_crc_errors =
10614                         BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10615                 stats->rx_frame_errors =
10616                         BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10617                 stats->rx_length_errors =
10618                         BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10619                         BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10620                         BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10621                 stats->rx_errors =
10622                         BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10623                         BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10624                 stats->collisions =
10625                         BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10626                 stats->tx_fifo_errors =
10627                         BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10628                 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10629         }
10630         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10631 }
10632
10633 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10634 {
10635         struct net_device *dev = bp->dev;
10636         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10637         struct netdev_hw_addr *ha;
10638         u8 *haddr;
10639         int mc_count = 0;
10640         bool update = false;
10641         int off = 0;
10642
10643         netdev_for_each_mc_addr(ha, dev) {
10644                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10645                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10646                         vnic->mc_list_count = 0;
10647                         return false;
10648                 }
10649                 haddr = ha->addr;
10650                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10651                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10652                         update = true;
10653                 }
10654                 off += ETH_ALEN;
10655                 mc_count++;
10656         }
10657         if (mc_count)
10658                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10659
10660         if (mc_count != vnic->mc_list_count) {
10661                 vnic->mc_list_count = mc_count;
10662                 update = true;
10663         }
10664         return update;
10665 }
10666
10667 static bool bnxt_uc_list_updated(struct bnxt *bp)
10668 {
10669         struct net_device *dev = bp->dev;
10670         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10671         struct netdev_hw_addr *ha;
10672         int off = 0;
10673
10674         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10675                 return true;
10676
10677         netdev_for_each_uc_addr(ha, dev) {
10678                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10679                         return true;
10680
10681                 off += ETH_ALEN;
10682         }
10683         return false;
10684 }
10685
10686 static void bnxt_set_rx_mode(struct net_device *dev)
10687 {
10688         struct bnxt *bp = netdev_priv(dev);
10689         struct bnxt_vnic_info *vnic;
10690         bool mc_update = false;
10691         bool uc_update;
10692         u32 mask;
10693
10694         if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10695                 return;
10696
10697         vnic = &bp->vnic_info[0];
10698         mask = vnic->rx_mask;
10699         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10700                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10701                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10702                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10703
10704         if (dev->flags & IFF_PROMISC)
10705                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10706
10707         uc_update = bnxt_uc_list_updated(bp);
10708
10709         if (dev->flags & IFF_BROADCAST)
10710                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10711         if (dev->flags & IFF_ALLMULTI) {
10712                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10713                 vnic->mc_list_count = 0;
10714         } else {
10715                 mc_update = bnxt_mc_list_updated(bp, &mask);
10716         }
10717
10718         if (mask != vnic->rx_mask || uc_update || mc_update) {
10719                 vnic->rx_mask = mask;
10720
10721                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10722                 bnxt_queue_sp_work(bp);
10723         }
10724 }
10725
10726 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10727 {
10728         struct net_device *dev = bp->dev;
10729         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10730         struct hwrm_cfa_l2_filter_free_input *req;
10731         struct netdev_hw_addr *ha;
10732         int i, off = 0, rc;
10733         bool uc_update;
10734
10735         netif_addr_lock_bh(dev);
10736         uc_update = bnxt_uc_list_updated(bp);
10737         netif_addr_unlock_bh(dev);
10738
10739         if (!uc_update)
10740                 goto skip_uc;
10741
10742         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
10743         if (rc)
10744                 return rc;
10745         hwrm_req_hold(bp, req);
10746         for (i = 1; i < vnic->uc_filter_count; i++) {
10747                 req->l2_filter_id = vnic->fw_l2_filter_id[i];
10748
10749                 rc = hwrm_req_send(bp, req);
10750         }
10751         hwrm_req_drop(bp, req);
10752
10753         vnic->uc_filter_count = 1;
10754
10755         netif_addr_lock_bh(dev);
10756         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10757                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10758         } else {
10759                 netdev_for_each_uc_addr(ha, dev) {
10760                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10761                         off += ETH_ALEN;
10762                         vnic->uc_filter_count++;
10763                 }
10764         }
10765         netif_addr_unlock_bh(dev);
10766
10767         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10768                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10769                 if (rc) {
10770                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10771                                    rc);
10772                         vnic->uc_filter_count = i;
10773                         return rc;
10774                 }
10775         }
10776
10777 skip_uc:
10778         if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10779             !bnxt_promisc_ok(bp))
10780                 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10781         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10782         if (rc && vnic->mc_list_count) {
10783                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10784                             rc);
10785                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10786                 vnic->mc_list_count = 0;
10787                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10788         }
10789         if (rc)
10790                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10791                            rc);
10792
10793         return rc;
10794 }
10795
10796 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10797 {
10798 #ifdef CONFIG_BNXT_SRIOV
10799         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10800                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10801
10802                 /* No minimum rings were provisioned by the PF.  Don't
10803                  * reserve rings by default when device is down.
10804                  */
10805                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10806                         return true;
10807
10808                 if (!netif_running(bp->dev))
10809                         return false;
10810         }
10811 #endif
10812         return true;
10813 }
10814
10815 /* If the chip and firmware supports RFS */
10816 static bool bnxt_rfs_supported(struct bnxt *bp)
10817 {
10818         if (bp->flags & BNXT_FLAG_CHIP_P5) {
10819                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10820                         return true;
10821                 return false;
10822         }
10823         /* 212 firmware is broken for aRFS */
10824         if (BNXT_FW_MAJ(bp) == 212)
10825                 return false;
10826         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10827                 return true;
10828         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10829                 return true;
10830         return false;
10831 }
10832
10833 /* If runtime conditions support RFS */
10834 static bool bnxt_rfs_capable(struct bnxt *bp)
10835 {
10836 #ifdef CONFIG_RFS_ACCEL
10837         int vnics, max_vnics, max_rss_ctxs;
10838
10839         if (bp->flags & BNXT_FLAG_CHIP_P5)
10840                 return bnxt_rfs_supported(bp);
10841         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10842                 return false;
10843
10844         vnics = 1 + bp->rx_nr_rings;
10845         max_vnics = bnxt_get_max_func_vnics(bp);
10846         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10847
10848         /* RSS contexts not a limiting factor */
10849         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10850                 max_rss_ctxs = max_vnics;
10851         if (vnics > max_vnics || vnics > max_rss_ctxs) {
10852                 if (bp->rx_nr_rings > 1)
10853                         netdev_warn(bp->dev,
10854                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10855                                     min(max_rss_ctxs - 1, max_vnics - 1));
10856                 return false;
10857         }
10858
10859         if (!BNXT_NEW_RM(bp))
10860                 return true;
10861
10862         if (vnics == bp->hw_resc.resv_vnics)
10863                 return true;
10864
10865         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10866         if (vnics <= bp->hw_resc.resv_vnics)
10867                 return true;
10868
10869         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10870         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10871         return false;
10872 #else
10873         return false;
10874 #endif
10875 }
10876
10877 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10878                                            netdev_features_t features)
10879 {
10880         struct bnxt *bp = netdev_priv(dev);
10881         netdev_features_t vlan_features;
10882
10883         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10884                 features &= ~NETIF_F_NTUPLE;
10885
10886         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10887                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10888
10889         if (!(features & NETIF_F_GRO))
10890                 features &= ~NETIF_F_GRO_HW;
10891
10892         if (features & NETIF_F_GRO_HW)
10893                 features &= ~NETIF_F_LRO;
10894
10895         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10896          * turned on or off together.
10897          */
10898         vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10899         if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10900                 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10901                         features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10902                 else if (vlan_features)
10903                         features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10904         }
10905 #ifdef CONFIG_BNXT_SRIOV
10906         if (BNXT_VF(bp) && bp->vf.vlan)
10907                 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10908 #endif
10909         return features;
10910 }
10911
10912 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10913 {
10914         struct bnxt *bp = netdev_priv(dev);
10915         u32 flags = bp->flags;
10916         u32 changes;
10917         int rc = 0;
10918         bool re_init = false;
10919         bool update_tpa = false;
10920
10921         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10922         if (features & NETIF_F_GRO_HW)
10923                 flags |= BNXT_FLAG_GRO;
10924         else if (features & NETIF_F_LRO)
10925                 flags |= BNXT_FLAG_LRO;
10926
10927         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10928                 flags &= ~BNXT_FLAG_TPA;
10929
10930         if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10931                 flags |= BNXT_FLAG_STRIP_VLAN;
10932
10933         if (features & NETIF_F_NTUPLE)
10934                 flags |= BNXT_FLAG_RFS;
10935
10936         changes = flags ^ bp->flags;
10937         if (changes & BNXT_FLAG_TPA) {
10938                 update_tpa = true;
10939                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10940                     (flags & BNXT_FLAG_TPA) == 0 ||
10941                     (bp->flags & BNXT_FLAG_CHIP_P5))
10942                         re_init = true;
10943         }
10944
10945         if (changes & ~BNXT_FLAG_TPA)
10946                 re_init = true;
10947
10948         if (flags != bp->flags) {
10949                 u32 old_flags = bp->flags;
10950
10951                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10952                         bp->flags = flags;
10953                         if (update_tpa)
10954                                 bnxt_set_ring_params(bp);
10955                         return rc;
10956                 }
10957
10958                 if (re_init) {
10959                         bnxt_close_nic(bp, false, false);
10960                         bp->flags = flags;
10961                         if (update_tpa)
10962                                 bnxt_set_ring_params(bp);
10963
10964                         return bnxt_open_nic(bp, false, false);
10965                 }
10966                 if (update_tpa) {
10967                         bp->flags = flags;
10968                         rc = bnxt_set_tpa(bp,
10969                                           (flags & BNXT_FLAG_TPA) ?
10970                                           true : false);
10971                         if (rc)
10972                                 bp->flags = old_flags;
10973                 }
10974         }
10975         return rc;
10976 }
10977
10978 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
10979                               u8 **nextp)
10980 {
10981         struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
10982         int hdr_count = 0;
10983         u8 *nexthdr;
10984         int start;
10985
10986         /* Check that there are at most 2 IPv6 extension headers, no
10987          * fragment header, and each is <= 64 bytes.
10988          */
10989         start = nw_off + sizeof(*ip6h);
10990         nexthdr = &ip6h->nexthdr;
10991         while (ipv6_ext_hdr(*nexthdr)) {
10992                 struct ipv6_opt_hdr *hp;
10993                 int hdrlen;
10994
10995                 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
10996                     *nexthdr == NEXTHDR_FRAGMENT)
10997                         return false;
10998                 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
10999                                           skb_headlen(skb), NULL);
11000                 if (!hp)
11001                         return false;
11002                 if (*nexthdr == NEXTHDR_AUTH)
11003                         hdrlen = ipv6_authlen(hp);
11004                 else
11005                         hdrlen = ipv6_optlen(hp);
11006
11007                 if (hdrlen > 64)
11008                         return false;
11009                 nexthdr = &hp->nexthdr;
11010                 start += hdrlen;
11011                 hdr_count++;
11012         }
11013         if (nextp) {
11014                 /* Caller will check inner protocol */
11015                 if (skb->encapsulation) {
11016                         *nextp = nexthdr;
11017                         return true;
11018                 }
11019                 *nextp = NULL;
11020         }
11021         /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11022         return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11023 }
11024
11025 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11026 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11027 {
11028         struct udphdr *uh = udp_hdr(skb);
11029         __be16 udp_port = uh->dest;
11030
11031         if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11032                 return false;
11033         if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11034                 struct ethhdr *eh = inner_eth_hdr(skb);
11035
11036                 switch (eh->h_proto) {
11037                 case htons(ETH_P_IP):
11038                         return true;
11039                 case htons(ETH_P_IPV6):
11040                         return bnxt_exthdr_check(bp, skb,
11041                                                  skb_inner_network_offset(skb),
11042                                                  NULL);
11043                 }
11044         }
11045         return false;
11046 }
11047
11048 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11049 {
11050         switch (l4_proto) {
11051         case IPPROTO_UDP:
11052                 return bnxt_udp_tunl_check(bp, skb);
11053         case IPPROTO_IPIP:
11054                 return true;
11055         case IPPROTO_GRE: {
11056                 switch (skb->inner_protocol) {
11057                 default:
11058                         return false;
11059                 case htons(ETH_P_IP):
11060                         return true;
11061                 case htons(ETH_P_IPV6):
11062                         fallthrough;
11063                 }
11064         }
11065         case IPPROTO_IPV6:
11066                 /* Check ext headers of inner ipv6 */
11067                 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11068                                          NULL);
11069         }
11070         return false;
11071 }
11072
11073 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11074                                              struct net_device *dev,
11075                                              netdev_features_t features)
11076 {
11077         struct bnxt *bp = netdev_priv(dev);
11078         u8 *l4_proto;
11079
11080         features = vlan_features_check(skb, features);
11081         switch (vlan_get_protocol(skb)) {
11082         case htons(ETH_P_IP):
11083                 if (!skb->encapsulation)
11084                         return features;
11085                 l4_proto = &ip_hdr(skb)->protocol;
11086                 if (bnxt_tunl_check(bp, skb, *l4_proto))
11087                         return features;
11088                 break;
11089         case htons(ETH_P_IPV6):
11090                 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11091                                        &l4_proto))
11092                         break;
11093                 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11094                         return features;
11095                 break;
11096         }
11097         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11098 }
11099
11100 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11101                          u32 *reg_buf)
11102 {
11103         struct hwrm_dbg_read_direct_output *resp;
11104         struct hwrm_dbg_read_direct_input *req;
11105         __le32 *dbg_reg_buf;
11106         dma_addr_t mapping;
11107         int rc, i;
11108
11109         rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11110         if (rc)
11111                 return rc;
11112
11113         dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11114                                          &mapping);
11115         if (!dbg_reg_buf) {
11116                 rc = -ENOMEM;
11117                 goto dbg_rd_reg_exit;
11118         }
11119
11120         req->host_dest_addr = cpu_to_le64(mapping);
11121
11122         resp = hwrm_req_hold(bp, req);
11123         req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11124         req->read_len32 = cpu_to_le32(num_words);
11125
11126         rc = hwrm_req_send(bp, req);
11127         if (rc || resp->error_code) {
11128                 rc = -EIO;
11129                 goto dbg_rd_reg_exit;
11130         }
11131         for (i = 0; i < num_words; i++)
11132                 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11133
11134 dbg_rd_reg_exit:
11135         hwrm_req_drop(bp, req);
11136         return rc;
11137 }
11138
11139 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11140                                        u32 ring_id, u32 *prod, u32 *cons)
11141 {
11142         struct hwrm_dbg_ring_info_get_output *resp;
11143         struct hwrm_dbg_ring_info_get_input *req;
11144         int rc;
11145
11146         rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11147         if (rc)
11148                 return rc;
11149
11150         req->ring_type = ring_type;
11151         req->fw_ring_id = cpu_to_le32(ring_id);
11152         resp = hwrm_req_hold(bp, req);
11153         rc = hwrm_req_send(bp, req);
11154         if (!rc) {
11155                 *prod = le32_to_cpu(resp->producer_index);
11156                 *cons = le32_to_cpu(resp->consumer_index);
11157         }
11158         hwrm_req_drop(bp, req);
11159         return rc;
11160 }
11161
11162 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11163 {
11164         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11165         int i = bnapi->index;
11166
11167         if (!txr)
11168                 return;
11169
11170         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11171                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11172                     txr->tx_cons);
11173 }
11174
11175 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11176 {
11177         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11178         int i = bnapi->index;
11179
11180         if (!rxr)
11181                 return;
11182
11183         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11184                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11185                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11186                     rxr->rx_sw_agg_prod);
11187 }
11188
11189 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11190 {
11191         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11192         int i = bnapi->index;
11193
11194         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11195                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11196 }
11197
11198 static void bnxt_dbg_dump_states(struct bnxt *bp)
11199 {
11200         int i;
11201         struct bnxt_napi *bnapi;
11202
11203         for (i = 0; i < bp->cp_nr_rings; i++) {
11204                 bnapi = bp->bnapi[i];
11205                 if (netif_msg_drv(bp)) {
11206                         bnxt_dump_tx_sw_state(bnapi);
11207                         bnxt_dump_rx_sw_state(bnapi);
11208                         bnxt_dump_cp_sw_state(bnapi);
11209                 }
11210         }
11211 }
11212
11213 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11214 {
11215         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11216         struct hwrm_ring_reset_input *req;
11217         struct bnxt_napi *bnapi = rxr->bnapi;
11218         struct bnxt_cp_ring_info *cpr;
11219         u16 cp_ring_id;
11220         int rc;
11221
11222         rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11223         if (rc)
11224                 return rc;
11225
11226         cpr = &bnapi->cp_ring;
11227         cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11228         req->cmpl_ring = cpu_to_le16(cp_ring_id);
11229         req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11230         req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11231         return hwrm_req_send_silent(bp, req);
11232 }
11233
11234 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11235 {
11236         if (!silent)
11237                 bnxt_dbg_dump_states(bp);
11238         if (netif_running(bp->dev)) {
11239                 int rc;
11240
11241                 if (silent) {
11242                         bnxt_close_nic(bp, false, false);
11243                         bnxt_open_nic(bp, false, false);
11244                 } else {
11245                         bnxt_ulp_stop(bp);
11246                         bnxt_close_nic(bp, true, false);
11247                         rc = bnxt_open_nic(bp, true, false);
11248                         bnxt_ulp_start(bp, rc);
11249                 }
11250         }
11251 }
11252
11253 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11254 {
11255         struct bnxt *bp = netdev_priv(dev);
11256
11257         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
11258         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11259         bnxt_queue_sp_work(bp);
11260 }
11261
11262 static void bnxt_fw_health_check(struct bnxt *bp)
11263 {
11264         struct bnxt_fw_health *fw_health = bp->fw_health;
11265         u32 val;
11266
11267         if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11268                 return;
11269
11270         /* Make sure it is enabled before checking the tmr_counter. */
11271         smp_rmb();
11272         if (fw_health->tmr_counter) {
11273                 fw_health->tmr_counter--;
11274                 return;
11275         }
11276
11277         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11278         if (val == fw_health->last_fw_heartbeat)
11279                 goto fw_reset;
11280
11281         fw_health->last_fw_heartbeat = val;
11282
11283         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11284         if (val != fw_health->last_fw_reset_cnt)
11285                 goto fw_reset;
11286
11287         fw_health->tmr_counter = fw_health->tmr_multiplier;
11288         return;
11289
11290 fw_reset:
11291         set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11292         bnxt_queue_sp_work(bp);
11293 }
11294
11295 static void bnxt_timer(struct timer_list *t)
11296 {
11297         struct bnxt *bp = from_timer(bp, t, timer);
11298         struct net_device *dev = bp->dev;
11299
11300         if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11301                 return;
11302
11303         if (atomic_read(&bp->intr_sem) != 0)
11304                 goto bnxt_restart_timer;
11305
11306         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11307                 bnxt_fw_health_check(bp);
11308
11309         if (bp->link_info.link_up && bp->stats_coal_ticks) {
11310                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11311                 bnxt_queue_sp_work(bp);
11312         }
11313
11314         if (bnxt_tc_flower_enabled(bp)) {
11315                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11316                 bnxt_queue_sp_work(bp);
11317         }
11318
11319 #ifdef CONFIG_RFS_ACCEL
11320         if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11321                 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11322                 bnxt_queue_sp_work(bp);
11323         }
11324 #endif /*CONFIG_RFS_ACCEL*/
11325
11326         if (bp->link_info.phy_retry) {
11327                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11328                         bp->link_info.phy_retry = false;
11329                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11330                 } else {
11331                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11332                         bnxt_queue_sp_work(bp);
11333                 }
11334         }
11335
11336         if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11337             netif_carrier_ok(dev)) {
11338                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11339                 bnxt_queue_sp_work(bp);
11340         }
11341 bnxt_restart_timer:
11342         mod_timer(&bp->timer, jiffies + bp->current_interval);
11343 }
11344
11345 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11346 {
11347         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11348          * set.  If the device is being closed, bnxt_close() may be holding
11349          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
11350          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11351          */
11352         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11353         rtnl_lock();
11354 }
11355
11356 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11357 {
11358         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11359         rtnl_unlock();
11360 }
11361
11362 /* Only called from bnxt_sp_task() */
11363 static void bnxt_reset(struct bnxt *bp, bool silent)
11364 {
11365         bnxt_rtnl_lock_sp(bp);
11366         if (test_bit(BNXT_STATE_OPEN, &bp->state))
11367                 bnxt_reset_task(bp, silent);
11368         bnxt_rtnl_unlock_sp(bp);
11369 }
11370
11371 /* Only called from bnxt_sp_task() */
11372 static void bnxt_rx_ring_reset(struct bnxt *bp)
11373 {
11374         int i;
11375
11376         bnxt_rtnl_lock_sp(bp);
11377         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11378                 bnxt_rtnl_unlock_sp(bp);
11379                 return;
11380         }
11381         /* Disable and flush TPA before resetting the RX ring */
11382         if (bp->flags & BNXT_FLAG_TPA)
11383                 bnxt_set_tpa(bp, false);
11384         for (i = 0; i < bp->rx_nr_rings; i++) {
11385                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11386                 struct bnxt_cp_ring_info *cpr;
11387                 int rc;
11388
11389                 if (!rxr->bnapi->in_reset)
11390                         continue;
11391
11392                 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11393                 if (rc) {
11394                         if (rc == -EINVAL || rc == -EOPNOTSUPP)
11395                                 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11396                         else
11397                                 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11398                                             rc);
11399                         bnxt_reset_task(bp, true);
11400                         break;
11401                 }
11402                 bnxt_free_one_rx_ring_skbs(bp, i);
11403                 rxr->rx_prod = 0;
11404                 rxr->rx_agg_prod = 0;
11405                 rxr->rx_sw_agg_prod = 0;
11406                 rxr->rx_next_cons = 0;
11407                 rxr->bnapi->in_reset = false;
11408                 bnxt_alloc_one_rx_ring(bp, i);
11409                 cpr = &rxr->bnapi->cp_ring;
11410                 cpr->sw_stats.rx.rx_resets++;
11411                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11412                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11413                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11414         }
11415         if (bp->flags & BNXT_FLAG_TPA)
11416                 bnxt_set_tpa(bp, true);
11417         bnxt_rtnl_unlock_sp(bp);
11418 }
11419
11420 static void bnxt_fw_reset_close(struct bnxt *bp)
11421 {
11422         bnxt_ulp_stop(bp);
11423         /* When firmware is in fatal state, quiesce device and disable
11424          * bus master to prevent any potential bad DMAs before freeing
11425          * kernel memory.
11426          */
11427         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11428                 u16 val = 0;
11429
11430                 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11431                 if (val == 0xffff)
11432                         bp->fw_reset_min_dsecs = 0;
11433                 bnxt_tx_disable(bp);
11434                 bnxt_disable_napi(bp);
11435                 bnxt_disable_int_sync(bp);
11436                 bnxt_free_irq(bp);
11437                 bnxt_clear_int_mode(bp);
11438                 pci_disable_device(bp->pdev);
11439         }
11440         __bnxt_close_nic(bp, true, false);
11441         bnxt_vf_reps_free(bp);
11442         bnxt_clear_int_mode(bp);
11443         bnxt_hwrm_func_drv_unrgtr(bp);
11444         if (pci_is_enabled(bp->pdev))
11445                 pci_disable_device(bp->pdev);
11446         bnxt_free_ctx_mem(bp);
11447         kfree(bp->ctx);
11448         bp->ctx = NULL;
11449 }
11450
11451 static bool is_bnxt_fw_ok(struct bnxt *bp)
11452 {
11453         struct bnxt_fw_health *fw_health = bp->fw_health;
11454         bool no_heartbeat = false, has_reset = false;
11455         u32 val;
11456
11457         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11458         if (val == fw_health->last_fw_heartbeat)
11459                 no_heartbeat = true;
11460
11461         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11462         if (val != fw_health->last_fw_reset_cnt)
11463                 has_reset = true;
11464
11465         if (!no_heartbeat && has_reset)
11466                 return true;
11467
11468         return false;
11469 }
11470
11471 /* rtnl_lock is acquired before calling this function */
11472 static void bnxt_force_fw_reset(struct bnxt *bp)
11473 {
11474         struct bnxt_fw_health *fw_health = bp->fw_health;
11475         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11476         u32 wait_dsecs;
11477
11478         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11479             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11480                 return;
11481
11482         if (ptp) {
11483                 spin_lock_bh(&ptp->ptp_lock);
11484                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11485                 spin_unlock_bh(&ptp->ptp_lock);
11486         } else {
11487                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11488         }
11489         bnxt_fw_reset_close(bp);
11490         wait_dsecs = fw_health->master_func_wait_dsecs;
11491         if (fw_health->master) {
11492                 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11493                         wait_dsecs = 0;
11494                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11495         } else {
11496                 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11497                 wait_dsecs = fw_health->normal_func_wait_dsecs;
11498                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11499         }
11500
11501         bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11502         bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11503         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11504 }
11505
11506 void bnxt_fw_exception(struct bnxt *bp)
11507 {
11508         netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11509         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11510         bnxt_rtnl_lock_sp(bp);
11511         bnxt_force_fw_reset(bp);
11512         bnxt_rtnl_unlock_sp(bp);
11513 }
11514
11515 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11516  * < 0 on error.
11517  */
11518 static int bnxt_get_registered_vfs(struct bnxt *bp)
11519 {
11520 #ifdef CONFIG_BNXT_SRIOV
11521         int rc;
11522
11523         if (!BNXT_PF(bp))
11524                 return 0;
11525
11526         rc = bnxt_hwrm_func_qcfg(bp);
11527         if (rc) {
11528                 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11529                 return rc;
11530         }
11531         if (bp->pf.registered_vfs)
11532                 return bp->pf.registered_vfs;
11533         if (bp->sriov_cfg)
11534                 return 1;
11535 #endif
11536         return 0;
11537 }
11538
11539 void bnxt_fw_reset(struct bnxt *bp)
11540 {
11541         bnxt_rtnl_lock_sp(bp);
11542         if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11543             !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11544                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11545                 int n = 0, tmo;
11546
11547                 if (ptp) {
11548                         spin_lock_bh(&ptp->ptp_lock);
11549                         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11550                         spin_unlock_bh(&ptp->ptp_lock);
11551                 } else {
11552                         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11553                 }
11554                 if (bp->pf.active_vfs &&
11555                     !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11556                         n = bnxt_get_registered_vfs(bp);
11557                 if (n < 0) {
11558                         netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11559                                    n);
11560                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11561                         dev_close(bp->dev);
11562                         goto fw_reset_exit;
11563                 } else if (n > 0) {
11564                         u16 vf_tmo_dsecs = n * 10;
11565
11566                         if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11567                                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11568                         bp->fw_reset_state =
11569                                 BNXT_FW_RESET_STATE_POLL_VF;
11570                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11571                         goto fw_reset_exit;
11572                 }
11573                 bnxt_fw_reset_close(bp);
11574                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11575                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11576                         tmo = HZ / 10;
11577                 } else {
11578                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11579                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
11580                 }
11581                 bnxt_queue_fw_reset_work(bp, tmo);
11582         }
11583 fw_reset_exit:
11584         bnxt_rtnl_unlock_sp(bp);
11585 }
11586
11587 static void bnxt_chk_missed_irq(struct bnxt *bp)
11588 {
11589         int i;
11590
11591         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11592                 return;
11593
11594         for (i = 0; i < bp->cp_nr_rings; i++) {
11595                 struct bnxt_napi *bnapi = bp->bnapi[i];
11596                 struct bnxt_cp_ring_info *cpr;
11597                 u32 fw_ring_id;
11598                 int j;
11599
11600                 if (!bnapi)
11601                         continue;
11602
11603                 cpr = &bnapi->cp_ring;
11604                 for (j = 0; j < 2; j++) {
11605                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11606                         u32 val[2];
11607
11608                         if (!cpr2 || cpr2->has_more_work ||
11609                             !bnxt_has_work(bp, cpr2))
11610                                 continue;
11611
11612                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11613                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11614                                 continue;
11615                         }
11616                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11617                         bnxt_dbg_hwrm_ring_info_get(bp,
11618                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11619                                 fw_ring_id, &val[0], &val[1]);
11620                         cpr->sw_stats.cmn.missed_irqs++;
11621                 }
11622         }
11623 }
11624
11625 static void bnxt_cfg_ntp_filters(struct bnxt *);
11626
11627 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11628 {
11629         struct bnxt_link_info *link_info = &bp->link_info;
11630
11631         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11632                 link_info->autoneg = BNXT_AUTONEG_SPEED;
11633                 if (bp->hwrm_spec_code >= 0x10201) {
11634                         if (link_info->auto_pause_setting &
11635                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11636                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11637                 } else {
11638                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11639                 }
11640                 link_info->advertising = link_info->auto_link_speeds;
11641                 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11642         } else {
11643                 link_info->req_link_speed = link_info->force_link_speed;
11644                 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11645                 if (link_info->force_pam4_link_speed) {
11646                         link_info->req_link_speed =
11647                                 link_info->force_pam4_link_speed;
11648                         link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11649                 }
11650                 link_info->req_duplex = link_info->duplex_setting;
11651         }
11652         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11653                 link_info->req_flow_ctrl =
11654                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11655         else
11656                 link_info->req_flow_ctrl = link_info->force_pause_setting;
11657 }
11658
11659 static void bnxt_fw_echo_reply(struct bnxt *bp)
11660 {
11661         struct bnxt_fw_health *fw_health = bp->fw_health;
11662         struct hwrm_func_echo_response_input *req;
11663         int rc;
11664
11665         rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
11666         if (rc)
11667                 return;
11668         req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11669         req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11670         hwrm_req_send(bp, req);
11671 }
11672
11673 static void bnxt_sp_task(struct work_struct *work)
11674 {
11675         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11676
11677         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11678         smp_mb__after_atomic();
11679         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11680                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11681                 return;
11682         }
11683
11684         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11685                 bnxt_cfg_rx_mode(bp);
11686
11687         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11688                 bnxt_cfg_ntp_filters(bp);
11689         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11690                 bnxt_hwrm_exec_fwd_req(bp);
11691         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11692                 bnxt_hwrm_port_qstats(bp, 0);
11693                 bnxt_hwrm_port_qstats_ext(bp, 0);
11694                 bnxt_accumulate_all_stats(bp);
11695         }
11696
11697         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11698                 int rc;
11699
11700                 mutex_lock(&bp->link_lock);
11701                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11702                                        &bp->sp_event))
11703                         bnxt_hwrm_phy_qcaps(bp);
11704
11705                 rc = bnxt_update_link(bp, true);
11706                 if (rc)
11707                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11708                                    rc);
11709
11710                 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11711                                        &bp->sp_event))
11712                         bnxt_init_ethtool_link_settings(bp);
11713                 mutex_unlock(&bp->link_lock);
11714         }
11715         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11716                 int rc;
11717
11718                 mutex_lock(&bp->link_lock);
11719                 rc = bnxt_update_phy_setting(bp);
11720                 mutex_unlock(&bp->link_lock);
11721                 if (rc) {
11722                         netdev_warn(bp->dev, "update phy settings retry failed\n");
11723                 } else {
11724                         bp->link_info.phy_retry = false;
11725                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
11726                 }
11727         }
11728         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11729                 mutex_lock(&bp->link_lock);
11730                 bnxt_get_port_module_status(bp);
11731                 mutex_unlock(&bp->link_lock);
11732         }
11733
11734         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11735                 bnxt_tc_flow_stats_work(bp);
11736
11737         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11738                 bnxt_chk_missed_irq(bp);
11739
11740         if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11741                 bnxt_fw_echo_reply(bp);
11742
11743         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
11744          * must be the last functions to be called before exiting.
11745          */
11746         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11747                 bnxt_reset(bp, false);
11748
11749         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11750                 bnxt_reset(bp, true);
11751
11752         if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11753                 bnxt_rx_ring_reset(bp);
11754
11755         if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11756                 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11757
11758         if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11759                 if (!is_bnxt_fw_ok(bp))
11760                         bnxt_devlink_health_report(bp,
11761                                                    BNXT_FW_EXCEPTION_SP_EVENT);
11762         }
11763
11764         smp_mb__before_atomic();
11765         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11766 }
11767
11768 /* Under rtnl_lock */
11769 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11770                      int tx_xdp)
11771 {
11772         int max_rx, max_tx, tx_sets = 1;
11773         int tx_rings_needed, stats;
11774         int rx_rings = rx;
11775         int cp, vnics, rc;
11776
11777         if (tcs)
11778                 tx_sets = tcs;
11779
11780         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11781         if (rc)
11782                 return rc;
11783
11784         if (max_rx < rx)
11785                 return -ENOMEM;
11786
11787         tx_rings_needed = tx * tx_sets + tx_xdp;
11788         if (max_tx < tx_rings_needed)
11789                 return -ENOMEM;
11790
11791         vnics = 1;
11792         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11793                 vnics += rx_rings;
11794
11795         if (bp->flags & BNXT_FLAG_AGG_RINGS)
11796                 rx_rings <<= 1;
11797         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11798         stats = cp;
11799         if (BNXT_NEW_RM(bp)) {
11800                 cp += bnxt_get_ulp_msix_num(bp);
11801                 stats += bnxt_get_ulp_stat_ctxs(bp);
11802         }
11803         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11804                                      stats, vnics);
11805 }
11806
11807 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11808 {
11809         if (bp->bar2) {
11810                 pci_iounmap(pdev, bp->bar2);
11811                 bp->bar2 = NULL;
11812         }
11813
11814         if (bp->bar1) {
11815                 pci_iounmap(pdev, bp->bar1);
11816                 bp->bar1 = NULL;
11817         }
11818
11819         if (bp->bar0) {
11820                 pci_iounmap(pdev, bp->bar0);
11821                 bp->bar0 = NULL;
11822         }
11823 }
11824
11825 static void bnxt_cleanup_pci(struct bnxt *bp)
11826 {
11827         bnxt_unmap_bars(bp, bp->pdev);
11828         pci_release_regions(bp->pdev);
11829         if (pci_is_enabled(bp->pdev))
11830                 pci_disable_device(bp->pdev);
11831 }
11832
11833 static void bnxt_init_dflt_coal(struct bnxt *bp)
11834 {
11835         struct bnxt_coal *coal;
11836
11837         /* Tick values in micro seconds.
11838          * 1 coal_buf x bufs_per_record = 1 completion record.
11839          */
11840         coal = &bp->rx_coal;
11841         coal->coal_ticks = 10;
11842         coal->coal_bufs = 30;
11843         coal->coal_ticks_irq = 1;
11844         coal->coal_bufs_irq = 2;
11845         coal->idle_thresh = 50;
11846         coal->bufs_per_record = 2;
11847         coal->budget = 64;              /* NAPI budget */
11848
11849         coal = &bp->tx_coal;
11850         coal->coal_ticks = 28;
11851         coal->coal_bufs = 30;
11852         coal->coal_ticks_irq = 2;
11853         coal->coal_bufs_irq = 2;
11854         coal->bufs_per_record = 1;
11855
11856         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11857 }
11858
11859 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11860 {
11861         int rc;
11862
11863         bp->fw_cap = 0;
11864         rc = bnxt_hwrm_ver_get(bp);
11865         bnxt_try_map_fw_health_reg(bp);
11866         if (rc) {
11867                 rc = bnxt_try_recover_fw(bp);
11868                 if (rc)
11869                         return rc;
11870                 rc = bnxt_hwrm_ver_get(bp);
11871                 if (rc)
11872                         return rc;
11873         }
11874
11875         bnxt_nvm_cfg_ver_get(bp);
11876
11877         rc = bnxt_hwrm_func_reset(bp);
11878         if (rc)
11879                 return -ENODEV;
11880
11881         bnxt_hwrm_fw_set_time(bp);
11882         return 0;
11883 }
11884
11885 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11886 {
11887         int rc;
11888
11889         /* Get the MAX capabilities for this function */
11890         rc = bnxt_hwrm_func_qcaps(bp);
11891         if (rc) {
11892                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11893                            rc);
11894                 return -ENODEV;
11895         }
11896
11897         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11898         if (rc)
11899                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11900                             rc);
11901
11902         if (bnxt_alloc_fw_health(bp)) {
11903                 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11904         } else {
11905                 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11906                 if (rc)
11907                         netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11908                                     rc);
11909         }
11910
11911         rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11912         if (rc)
11913                 return -ENODEV;
11914
11915         bnxt_hwrm_func_qcfg(bp);
11916         bnxt_hwrm_vnic_qcaps(bp);
11917         bnxt_hwrm_port_led_qcaps(bp);
11918         bnxt_ethtool_init(bp);
11919         bnxt_dcb_init(bp);
11920         return 0;
11921 }
11922
11923 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11924 {
11925         bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11926         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11927                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11928                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11929                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11930         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11931                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11932                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11933                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11934         }
11935 }
11936
11937 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11938 {
11939         struct net_device *dev = bp->dev;
11940
11941         dev->hw_features &= ~NETIF_F_NTUPLE;
11942         dev->features &= ~NETIF_F_NTUPLE;
11943         bp->flags &= ~BNXT_FLAG_RFS;
11944         if (bnxt_rfs_supported(bp)) {
11945                 dev->hw_features |= NETIF_F_NTUPLE;
11946                 if (bnxt_rfs_capable(bp)) {
11947                         bp->flags |= BNXT_FLAG_RFS;
11948                         dev->features |= NETIF_F_NTUPLE;
11949                 }
11950         }
11951 }
11952
11953 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11954 {
11955         struct pci_dev *pdev = bp->pdev;
11956
11957         bnxt_set_dflt_rss_hash_type(bp);
11958         bnxt_set_dflt_rfs(bp);
11959
11960         bnxt_get_wol_settings(bp);
11961         if (bp->flags & BNXT_FLAG_WOL_CAP)
11962                 device_set_wakeup_enable(&pdev->dev, bp->wol);
11963         else
11964                 device_set_wakeup_capable(&pdev->dev, false);
11965
11966         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11967         bnxt_hwrm_coal_params_qcaps(bp);
11968 }
11969
11970 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11971
11972 static int bnxt_fw_init_one(struct bnxt *bp)
11973 {
11974         int rc;
11975
11976         rc = bnxt_fw_init_one_p1(bp);
11977         if (rc) {
11978                 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11979                 return rc;
11980         }
11981         rc = bnxt_fw_init_one_p2(bp);
11982         if (rc) {
11983                 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11984                 return rc;
11985         }
11986         rc = bnxt_probe_phy(bp, false);
11987         if (rc)
11988                 return rc;
11989         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11990         if (rc)
11991                 return rc;
11992
11993         /* In case fw capabilities have changed, destroy the unneeded
11994          * reporters and create newly capable ones.
11995          */
11996         bnxt_dl_fw_reporters_destroy(bp, false);
11997         bnxt_dl_fw_reporters_create(bp);
11998         bnxt_fw_init_one_p3(bp);
11999         return 0;
12000 }
12001
12002 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12003 {
12004         struct bnxt_fw_health *fw_health = bp->fw_health;
12005         u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
12006         u32 val = fw_health->fw_reset_seq_vals[reg_idx];
12007         u32 reg_type, reg_off, delay_msecs;
12008
12009         delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
12010         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
12011         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
12012         switch (reg_type) {
12013         case BNXT_FW_HEALTH_REG_TYPE_CFG:
12014                 pci_write_config_dword(bp->pdev, reg_off, val);
12015                 break;
12016         case BNXT_FW_HEALTH_REG_TYPE_GRC:
12017                 writel(reg_off & BNXT_GRC_BASE_MASK,
12018                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12019                 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
12020                 fallthrough;
12021         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12022                 writel(val, bp->bar0 + reg_off);
12023                 break;
12024         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12025                 writel(val, bp->bar1 + reg_off);
12026                 break;
12027         }
12028         if (delay_msecs) {
12029                 pci_read_config_dword(bp->pdev, 0, &val);
12030                 msleep(delay_msecs);
12031         }
12032 }
12033
12034 static void bnxt_reset_all(struct bnxt *bp)
12035 {
12036         struct bnxt_fw_health *fw_health = bp->fw_health;
12037         int i, rc;
12038
12039         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12040                 bnxt_fw_reset_via_optee(bp);
12041                 bp->fw_reset_timestamp = jiffies;
12042                 return;
12043         }
12044
12045         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12046                 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12047                         bnxt_fw_reset_writel(bp, i);
12048         } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
12049                 struct hwrm_fw_reset_input *req;
12050
12051                 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12052                 if (!rc) {
12053                         req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12054                         req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12055                         req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12056                         req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12057                         rc = hwrm_req_send(bp, req);
12058                 }
12059                 if (rc != -ENODEV)
12060                         netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12061         }
12062         bp->fw_reset_timestamp = jiffies;
12063 }
12064
12065 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12066 {
12067         return time_after(jiffies, bp->fw_reset_timestamp +
12068                           (bp->fw_reset_max_dsecs * HZ / 10));
12069 }
12070
12071 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12072 {
12073         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12074         if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12075                 bnxt_ulp_start(bp, rc);
12076                 bnxt_dl_health_status_update(bp, false);
12077         }
12078         bp->fw_reset_state = 0;
12079         dev_close(bp->dev);
12080 }
12081
12082 static void bnxt_fw_reset_task(struct work_struct *work)
12083 {
12084         struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12085         int rc = 0;
12086
12087         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12088                 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12089                 return;
12090         }
12091
12092         switch (bp->fw_reset_state) {
12093         case BNXT_FW_RESET_STATE_POLL_VF: {
12094                 int n = bnxt_get_registered_vfs(bp);
12095                 int tmo;
12096
12097                 if (n < 0) {
12098                         netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12099                                    n, jiffies_to_msecs(jiffies -
12100                                    bp->fw_reset_timestamp));
12101                         goto fw_reset_abort;
12102                 } else if (n > 0) {
12103                         if (bnxt_fw_reset_timeout(bp)) {
12104                                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12105                                 bp->fw_reset_state = 0;
12106                                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12107                                            n);
12108                                 return;
12109                         }
12110                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12111                         return;
12112                 }
12113                 bp->fw_reset_timestamp = jiffies;
12114                 rtnl_lock();
12115                 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12116                         bnxt_fw_reset_abort(bp, rc);
12117                         rtnl_unlock();
12118                         return;
12119                 }
12120                 bnxt_fw_reset_close(bp);
12121                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12122                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12123                         tmo = HZ / 10;
12124                 } else {
12125                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12126                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
12127                 }
12128                 rtnl_unlock();
12129                 bnxt_queue_fw_reset_work(bp, tmo);
12130                 return;
12131         }
12132         case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12133                 u32 val;
12134
12135                 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12136                 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12137                     !bnxt_fw_reset_timeout(bp)) {
12138                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12139                         return;
12140                 }
12141
12142                 if (!bp->fw_health->master) {
12143                         u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12144
12145                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12146                         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12147                         return;
12148                 }
12149                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12150         }
12151                 fallthrough;
12152         case BNXT_FW_RESET_STATE_RESET_FW:
12153                 bnxt_reset_all(bp);
12154                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12155                 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12156                 return;
12157         case BNXT_FW_RESET_STATE_ENABLE_DEV:
12158                 bnxt_inv_fw_health_reg(bp);
12159                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12160                     !bp->fw_reset_min_dsecs) {
12161                         u16 val;
12162
12163                         pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12164                         if (val == 0xffff) {
12165                                 if (bnxt_fw_reset_timeout(bp)) {
12166                                         netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12167                                         rc = -ETIMEDOUT;
12168                                         goto fw_reset_abort;
12169                                 }
12170                                 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12171                                 return;
12172                         }
12173                 }
12174                 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12175                 if (pci_enable_device(bp->pdev)) {
12176                         netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12177                         rc = -ENODEV;
12178                         goto fw_reset_abort;
12179                 }
12180                 pci_set_master(bp->pdev);
12181                 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12182                 fallthrough;
12183         case BNXT_FW_RESET_STATE_POLL_FW:
12184                 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12185                 rc = bnxt_hwrm_poll(bp);
12186                 if (rc) {
12187                         if (bnxt_fw_reset_timeout(bp)) {
12188                                 netdev_err(bp->dev, "Firmware reset aborted\n");
12189                                 goto fw_reset_abort_status;
12190                         }
12191                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12192                         return;
12193                 }
12194                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12195                 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12196                 fallthrough;
12197         case BNXT_FW_RESET_STATE_OPENING:
12198                 while (!rtnl_trylock()) {
12199                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12200                         return;
12201                 }
12202                 rc = bnxt_open(bp->dev);
12203                 if (rc) {
12204                         netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12205                         bnxt_fw_reset_abort(bp, rc);
12206                         rtnl_unlock();
12207                         return;
12208                 }
12209
12210                 bp->fw_reset_state = 0;
12211                 /* Make sure fw_reset_state is 0 before clearing the flag */
12212                 smp_mb__before_atomic();
12213                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12214                 bnxt_ulp_start(bp, 0);
12215                 bnxt_reenable_sriov(bp);
12216                 bnxt_vf_reps_alloc(bp);
12217                 bnxt_vf_reps_open(bp);
12218                 bnxt_ptp_reapply_pps(bp);
12219                 bnxt_dl_health_recovery_done(bp);
12220                 bnxt_dl_health_status_update(bp, true);
12221                 rtnl_unlock();
12222                 break;
12223         }
12224         return;
12225
12226 fw_reset_abort_status:
12227         if (bp->fw_health->status_reliable ||
12228             (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12229                 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12230
12231                 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12232         }
12233 fw_reset_abort:
12234         rtnl_lock();
12235         bnxt_fw_reset_abort(bp, rc);
12236         rtnl_unlock();
12237 }
12238
12239 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12240 {
12241         int rc;
12242         struct bnxt *bp = netdev_priv(dev);
12243
12244         SET_NETDEV_DEV(dev, &pdev->dev);
12245
12246         /* enable device (incl. PCI PM wakeup), and bus-mastering */
12247         rc = pci_enable_device(pdev);
12248         if (rc) {
12249                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12250                 goto init_err;
12251         }
12252
12253         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12254                 dev_err(&pdev->dev,
12255                         "Cannot find PCI device base address, aborting\n");
12256                 rc = -ENODEV;
12257                 goto init_err_disable;
12258         }
12259
12260         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12261         if (rc) {
12262                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12263                 goto init_err_disable;
12264         }
12265
12266         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12267             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12268                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12269                 rc = -EIO;
12270                 goto init_err_release;
12271         }
12272
12273         pci_set_master(pdev);
12274
12275         bp->dev = dev;
12276         bp->pdev = pdev;
12277
12278         /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12279          * determines the BAR size.
12280          */
12281         bp->bar0 = pci_ioremap_bar(pdev, 0);
12282         if (!bp->bar0) {
12283                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12284                 rc = -ENOMEM;
12285                 goto init_err_release;
12286         }
12287
12288         bp->bar2 = pci_ioremap_bar(pdev, 4);
12289         if (!bp->bar2) {
12290                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12291                 rc = -ENOMEM;
12292                 goto init_err_release;
12293         }
12294
12295         pci_enable_pcie_error_reporting(pdev);
12296
12297         INIT_WORK(&bp->sp_task, bnxt_sp_task);
12298         INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12299
12300         spin_lock_init(&bp->ntp_fltr_lock);
12301 #if BITS_PER_LONG == 32
12302         spin_lock_init(&bp->db_lock);
12303 #endif
12304
12305         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12306         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12307
12308         bnxt_init_dflt_coal(bp);
12309
12310         timer_setup(&bp->timer, bnxt_timer, 0);
12311         bp->current_interval = BNXT_TIMER_INTERVAL;
12312
12313         bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12314         bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12315
12316         clear_bit(BNXT_STATE_OPEN, &bp->state);
12317         return 0;
12318
12319 init_err_release:
12320         bnxt_unmap_bars(bp, pdev);
12321         pci_release_regions(pdev);
12322
12323 init_err_disable:
12324         pci_disable_device(pdev);
12325
12326 init_err:
12327         return rc;
12328 }
12329
12330 /* rtnl_lock held */
12331 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12332 {
12333         struct sockaddr *addr = p;
12334         struct bnxt *bp = netdev_priv(dev);
12335         int rc = 0;
12336
12337         if (!is_valid_ether_addr(addr->sa_data))
12338                 return -EADDRNOTAVAIL;
12339
12340         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12341                 return 0;
12342
12343         rc = bnxt_approve_mac(bp, addr->sa_data, true);
12344         if (rc)
12345                 return rc;
12346
12347         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12348         if (netif_running(dev)) {
12349                 bnxt_close_nic(bp, false, false);
12350                 rc = bnxt_open_nic(bp, false, false);
12351         }
12352
12353         return rc;
12354 }
12355
12356 /* rtnl_lock held */
12357 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12358 {
12359         struct bnxt *bp = netdev_priv(dev);
12360
12361         if (netif_running(dev))
12362                 bnxt_close_nic(bp, true, false);
12363
12364         dev->mtu = new_mtu;
12365         bnxt_set_ring_params(bp);
12366
12367         if (netif_running(dev))
12368                 return bnxt_open_nic(bp, true, false);
12369
12370         return 0;
12371 }
12372
12373 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12374 {
12375         struct bnxt *bp = netdev_priv(dev);
12376         bool sh = false;
12377         int rc;
12378
12379         if (tc > bp->max_tc) {
12380                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12381                            tc, bp->max_tc);
12382                 return -EINVAL;
12383         }
12384
12385         if (netdev_get_num_tc(dev) == tc)
12386                 return 0;
12387
12388         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12389                 sh = true;
12390
12391         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12392                               sh, tc, bp->tx_nr_rings_xdp);
12393         if (rc)
12394                 return rc;
12395
12396         /* Needs to close the device and do hw resource re-allocations */
12397         if (netif_running(bp->dev))
12398                 bnxt_close_nic(bp, true, false);
12399
12400         if (tc) {
12401                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12402                 netdev_set_num_tc(dev, tc);
12403         } else {
12404                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12405                 netdev_reset_tc(dev);
12406         }
12407         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12408         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12409                                bp->tx_nr_rings + bp->rx_nr_rings;
12410
12411         if (netif_running(bp->dev))
12412                 return bnxt_open_nic(bp, true, false);
12413
12414         return 0;
12415 }
12416
12417 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12418                                   void *cb_priv)
12419 {
12420         struct bnxt *bp = cb_priv;
12421
12422         if (!bnxt_tc_flower_enabled(bp) ||
12423             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12424                 return -EOPNOTSUPP;
12425
12426         switch (type) {
12427         case TC_SETUP_CLSFLOWER:
12428                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12429         default:
12430                 return -EOPNOTSUPP;
12431         }
12432 }
12433
12434 LIST_HEAD(bnxt_block_cb_list);
12435
12436 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12437                          void *type_data)
12438 {
12439         struct bnxt *bp = netdev_priv(dev);
12440
12441         switch (type) {
12442         case TC_SETUP_BLOCK:
12443                 return flow_block_cb_setup_simple(type_data,
12444                                                   &bnxt_block_cb_list,
12445                                                   bnxt_setup_tc_block_cb,
12446                                                   bp, bp, true);
12447         case TC_SETUP_QDISC_MQPRIO: {
12448                 struct tc_mqprio_qopt *mqprio = type_data;
12449
12450                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12451
12452                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12453         }
12454         default:
12455                 return -EOPNOTSUPP;
12456         }
12457 }
12458
12459 #ifdef CONFIG_RFS_ACCEL
12460 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12461                             struct bnxt_ntuple_filter *f2)
12462 {
12463         struct flow_keys *keys1 = &f1->fkeys;
12464         struct flow_keys *keys2 = &f2->fkeys;
12465
12466         if (keys1->basic.n_proto != keys2->basic.n_proto ||
12467             keys1->basic.ip_proto != keys2->basic.ip_proto)
12468                 return false;
12469
12470         if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12471                 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12472                     keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12473                         return false;
12474         } else {
12475                 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12476                            sizeof(keys1->addrs.v6addrs.src)) ||
12477                     memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12478                            sizeof(keys1->addrs.v6addrs.dst)))
12479                         return false;
12480         }
12481
12482         if (keys1->ports.ports == keys2->ports.ports &&
12483             keys1->control.flags == keys2->control.flags &&
12484             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12485             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12486                 return true;
12487
12488         return false;
12489 }
12490
12491 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12492                               u16 rxq_index, u32 flow_id)
12493 {
12494         struct bnxt *bp = netdev_priv(dev);
12495         struct bnxt_ntuple_filter *fltr, *new_fltr;
12496         struct flow_keys *fkeys;
12497         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12498         int rc = 0, idx, bit_id, l2_idx = 0;
12499         struct hlist_head *head;
12500         u32 flags;
12501
12502         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12503                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12504                 int off = 0, j;
12505
12506                 netif_addr_lock_bh(dev);
12507                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12508                         if (ether_addr_equal(eth->h_dest,
12509                                              vnic->uc_list + off)) {
12510                                 l2_idx = j + 1;
12511                                 break;
12512                         }
12513                 }
12514                 netif_addr_unlock_bh(dev);
12515                 if (!l2_idx)
12516                         return -EINVAL;
12517         }
12518         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12519         if (!new_fltr)
12520                 return -ENOMEM;
12521
12522         fkeys = &new_fltr->fkeys;
12523         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12524                 rc = -EPROTONOSUPPORT;
12525                 goto err_free;
12526         }
12527
12528         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12529              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12530             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12531              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12532                 rc = -EPROTONOSUPPORT;
12533                 goto err_free;
12534         }
12535         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12536             bp->hwrm_spec_code < 0x10601) {
12537                 rc = -EPROTONOSUPPORT;
12538                 goto err_free;
12539         }
12540         flags = fkeys->control.flags;
12541         if (((flags & FLOW_DIS_ENCAPSULATION) &&
12542              bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12543                 rc = -EPROTONOSUPPORT;
12544                 goto err_free;
12545         }
12546
12547         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12548         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12549
12550         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12551         head = &bp->ntp_fltr_hash_tbl[idx];
12552         rcu_read_lock();
12553         hlist_for_each_entry_rcu(fltr, head, hash) {
12554                 if (bnxt_fltr_match(fltr, new_fltr)) {
12555                         rcu_read_unlock();
12556                         rc = 0;
12557                         goto err_free;
12558                 }
12559         }
12560         rcu_read_unlock();
12561
12562         spin_lock_bh(&bp->ntp_fltr_lock);
12563         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12564                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
12565         if (bit_id < 0) {
12566                 spin_unlock_bh(&bp->ntp_fltr_lock);
12567                 rc = -ENOMEM;
12568                 goto err_free;
12569         }
12570
12571         new_fltr->sw_id = (u16)bit_id;
12572         new_fltr->flow_id = flow_id;
12573         new_fltr->l2_fltr_idx = l2_idx;
12574         new_fltr->rxq = rxq_index;
12575         hlist_add_head_rcu(&new_fltr->hash, head);
12576         bp->ntp_fltr_count++;
12577         spin_unlock_bh(&bp->ntp_fltr_lock);
12578
12579         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12580         bnxt_queue_sp_work(bp);
12581
12582         return new_fltr->sw_id;
12583
12584 err_free:
12585         kfree(new_fltr);
12586         return rc;
12587 }
12588
12589 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12590 {
12591         int i;
12592
12593         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12594                 struct hlist_head *head;
12595                 struct hlist_node *tmp;
12596                 struct bnxt_ntuple_filter *fltr;
12597                 int rc;
12598
12599                 head = &bp->ntp_fltr_hash_tbl[i];
12600                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12601                         bool del = false;
12602
12603                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12604                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12605                                                         fltr->flow_id,
12606                                                         fltr->sw_id)) {
12607                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
12608                                                                          fltr);
12609                                         del = true;
12610                                 }
12611                         } else {
12612                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12613                                                                        fltr);
12614                                 if (rc)
12615                                         del = true;
12616                                 else
12617                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
12618                         }
12619
12620                         if (del) {
12621                                 spin_lock_bh(&bp->ntp_fltr_lock);
12622                                 hlist_del_rcu(&fltr->hash);
12623                                 bp->ntp_fltr_count--;
12624                                 spin_unlock_bh(&bp->ntp_fltr_lock);
12625                                 synchronize_rcu();
12626                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12627                                 kfree(fltr);
12628                         }
12629                 }
12630         }
12631         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12632                 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12633 }
12634
12635 #else
12636
12637 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12638 {
12639 }
12640
12641 #endif /* CONFIG_RFS_ACCEL */
12642
12643 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12644 {
12645         struct bnxt *bp = netdev_priv(netdev);
12646         struct udp_tunnel_info ti;
12647         unsigned int cmd;
12648
12649         udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12650         if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
12651                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12652         else
12653                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12654
12655         if (ti.port)
12656                 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12657
12658         return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12659 }
12660
12661 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12662         .sync_table     = bnxt_udp_tunnel_sync,
12663         .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12664                           UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12665         .tables         = {
12666                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
12667                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12668         },
12669 };
12670
12671 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12672                                struct net_device *dev, u32 filter_mask,
12673                                int nlflags)
12674 {
12675         struct bnxt *bp = netdev_priv(dev);
12676
12677         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12678                                        nlflags, filter_mask, NULL);
12679 }
12680
12681 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12682                                u16 flags, struct netlink_ext_ack *extack)
12683 {
12684         struct bnxt *bp = netdev_priv(dev);
12685         struct nlattr *attr, *br_spec;
12686         int rem, rc = 0;
12687
12688         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12689                 return -EOPNOTSUPP;
12690
12691         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12692         if (!br_spec)
12693                 return -EINVAL;
12694
12695         nla_for_each_nested(attr, br_spec, rem) {
12696                 u16 mode;
12697
12698                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12699                         continue;
12700
12701                 if (nla_len(attr) < sizeof(mode))
12702                         return -EINVAL;
12703
12704                 mode = nla_get_u16(attr);
12705                 if (mode == bp->br_mode)
12706                         break;
12707
12708                 rc = bnxt_hwrm_set_br_mode(bp, mode);
12709                 if (!rc)
12710                         bp->br_mode = mode;
12711                 break;
12712         }
12713         return rc;
12714 }
12715
12716 int bnxt_get_port_parent_id(struct net_device *dev,
12717                             struct netdev_phys_item_id *ppid)
12718 {
12719         struct bnxt *bp = netdev_priv(dev);
12720
12721         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12722                 return -EOPNOTSUPP;
12723
12724         /* The PF and it's VF-reps only support the switchdev framework */
12725         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12726                 return -EOPNOTSUPP;
12727
12728         ppid->id_len = sizeof(bp->dsn);
12729         memcpy(ppid->id, bp->dsn, ppid->id_len);
12730
12731         return 0;
12732 }
12733
12734 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12735 {
12736         struct bnxt *bp = netdev_priv(dev);
12737
12738         return &bp->dl_port;
12739 }
12740
12741 static const struct net_device_ops bnxt_netdev_ops = {
12742         .ndo_open               = bnxt_open,
12743         .ndo_start_xmit         = bnxt_start_xmit,
12744         .ndo_stop               = bnxt_close,
12745         .ndo_get_stats64        = bnxt_get_stats64,
12746         .ndo_set_rx_mode        = bnxt_set_rx_mode,
12747         .ndo_eth_ioctl          = bnxt_ioctl,
12748         .ndo_validate_addr      = eth_validate_addr,
12749         .ndo_set_mac_address    = bnxt_change_mac_addr,
12750         .ndo_change_mtu         = bnxt_change_mtu,
12751         .ndo_fix_features       = bnxt_fix_features,
12752         .ndo_set_features       = bnxt_set_features,
12753         .ndo_features_check     = bnxt_features_check,
12754         .ndo_tx_timeout         = bnxt_tx_timeout,
12755 #ifdef CONFIG_BNXT_SRIOV
12756         .ndo_get_vf_config      = bnxt_get_vf_config,
12757         .ndo_set_vf_mac         = bnxt_set_vf_mac,
12758         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
12759         .ndo_set_vf_rate        = bnxt_set_vf_bw,
12760         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
12761         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
12762         .ndo_set_vf_trust       = bnxt_set_vf_trust,
12763 #endif
12764         .ndo_setup_tc           = bnxt_setup_tc,
12765 #ifdef CONFIG_RFS_ACCEL
12766         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
12767 #endif
12768         .ndo_bpf                = bnxt_xdp,
12769         .ndo_xdp_xmit           = bnxt_xdp_xmit,
12770         .ndo_bridge_getlink     = bnxt_bridge_getlink,
12771         .ndo_bridge_setlink     = bnxt_bridge_setlink,
12772         .ndo_get_devlink_port   = bnxt_get_devlink_port,
12773 };
12774
12775 static void bnxt_remove_one(struct pci_dev *pdev)
12776 {
12777         struct net_device *dev = pci_get_drvdata(pdev);
12778         struct bnxt *bp = netdev_priv(dev);
12779
12780         if (BNXT_PF(bp))
12781                 bnxt_sriov_disable(bp);
12782
12783         if (BNXT_PF(bp))
12784                 devlink_port_type_clear(&bp->dl_port);
12785
12786         bnxt_ptp_clear(bp);
12787         pci_disable_pcie_error_reporting(pdev);
12788         unregister_netdev(dev);
12789         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12790         /* Flush any pending tasks */
12791         cancel_work_sync(&bp->sp_task);
12792         cancel_delayed_work_sync(&bp->fw_reset_task);
12793         bp->sp_event = 0;
12794
12795         bnxt_dl_fw_reporters_destroy(bp, true);
12796         bnxt_dl_unregister(bp);
12797         bnxt_shutdown_tc(bp);
12798
12799         bnxt_clear_int_mode(bp);
12800         bnxt_hwrm_func_drv_unrgtr(bp);
12801         bnxt_free_hwrm_resources(bp);
12802         bnxt_ethtool_free(bp);
12803         bnxt_dcb_free(bp);
12804         kfree(bp->edev);
12805         bp->edev = NULL;
12806         kfree(bp->ptp_cfg);
12807         bp->ptp_cfg = NULL;
12808         kfree(bp->fw_health);
12809         bp->fw_health = NULL;
12810         bnxt_cleanup_pci(bp);
12811         bnxt_free_ctx_mem(bp);
12812         kfree(bp->ctx);
12813         bp->ctx = NULL;
12814         kfree(bp->rss_indir_tbl);
12815         bp->rss_indir_tbl = NULL;
12816         bnxt_free_port_stats(bp);
12817         free_netdev(dev);
12818 }
12819
12820 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12821 {
12822         int rc = 0;
12823         struct bnxt_link_info *link_info = &bp->link_info;
12824
12825         bp->phy_flags = 0;
12826         rc = bnxt_hwrm_phy_qcaps(bp);
12827         if (rc) {
12828                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12829                            rc);
12830                 return rc;
12831         }
12832         if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12833                 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12834         else
12835                 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
12836         if (!fw_dflt)
12837                 return 0;
12838
12839         mutex_lock(&bp->link_lock);
12840         rc = bnxt_update_link(bp, false);
12841         if (rc) {
12842                 mutex_unlock(&bp->link_lock);
12843                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12844                            rc);
12845                 return rc;
12846         }
12847
12848         /* Older firmware does not have supported_auto_speeds, so assume
12849          * that all supported speeds can be autonegotiated.
12850          */
12851         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12852                 link_info->support_auto_speeds = link_info->support_speeds;
12853
12854         bnxt_init_ethtool_link_settings(bp);
12855         mutex_unlock(&bp->link_lock);
12856         return 0;
12857 }
12858
12859 static int bnxt_get_max_irq(struct pci_dev *pdev)
12860 {
12861         u16 ctrl;
12862
12863         if (!pdev->msix_cap)
12864                 return 1;
12865
12866         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12867         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12868 }
12869
12870 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12871                                 int *max_cp)
12872 {
12873         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12874         int max_ring_grps = 0, max_irq;
12875
12876         *max_tx = hw_resc->max_tx_rings;
12877         *max_rx = hw_resc->max_rx_rings;
12878         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12879         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12880                         bnxt_get_ulp_msix_num(bp),
12881                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12882         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12883                 *max_cp = min_t(int, *max_cp, max_irq);
12884         max_ring_grps = hw_resc->max_hw_ring_grps;
12885         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12886                 *max_cp -= 1;
12887                 *max_rx -= 2;
12888         }
12889         if (bp->flags & BNXT_FLAG_AGG_RINGS)
12890                 *max_rx >>= 1;
12891         if (bp->flags & BNXT_FLAG_CHIP_P5) {
12892                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12893                 /* On P5 chips, max_cp output param should be available NQs */
12894                 *max_cp = max_irq;
12895         }
12896         *max_rx = min_t(int, *max_rx, max_ring_grps);
12897 }
12898
12899 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12900 {
12901         int rx, tx, cp;
12902
12903         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
12904         *max_rx = rx;
12905         *max_tx = tx;
12906         if (!rx || !tx || !cp)
12907                 return -ENOMEM;
12908
12909         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12910 }
12911
12912 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12913                                bool shared)
12914 {
12915         int rc;
12916
12917         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12918         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12919                 /* Not enough rings, try disabling agg rings. */
12920                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12921                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12922                 if (rc) {
12923                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
12924                         bp->flags |= BNXT_FLAG_AGG_RINGS;
12925                         return rc;
12926                 }
12927                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12928                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12929                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12930                 bnxt_set_ring_params(bp);
12931         }
12932
12933         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12934                 int max_cp, max_stat, max_irq;
12935
12936                 /* Reserve minimum resources for RoCE */
12937                 max_cp = bnxt_get_max_func_cp_rings(bp);
12938                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12939                 max_irq = bnxt_get_max_func_irqs(bp);
12940                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12941                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12942                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12943                         return 0;
12944
12945                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12946                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12947                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12948                 max_cp = min_t(int, max_cp, max_irq);
12949                 max_cp = min_t(int, max_cp, max_stat);
12950                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12951                 if (rc)
12952                         rc = 0;
12953         }
12954         return rc;
12955 }
12956
12957 /* In initial default shared ring setting, each shared ring must have a
12958  * RX/TX ring pair.
12959  */
12960 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12961 {
12962         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12963         bp->rx_nr_rings = bp->cp_nr_rings;
12964         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12965         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12966 }
12967
12968 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12969 {
12970         int dflt_rings, max_rx_rings, max_tx_rings, rc;
12971
12972         if (!bnxt_can_reserve_rings(bp))
12973                 return 0;
12974
12975         if (sh)
12976                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
12977         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
12978         /* Reduce default rings on multi-port cards so that total default
12979          * rings do not exceed CPU count.
12980          */
12981         if (bp->port_count > 1) {
12982                 int max_rings =
12983                         max_t(int, num_online_cpus() / bp->port_count, 1);
12984
12985                 dflt_rings = min_t(int, dflt_rings, max_rings);
12986         }
12987         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
12988         if (rc)
12989                 return rc;
12990         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12991         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
12992         if (sh)
12993                 bnxt_trim_dflt_sh_rings(bp);
12994         else
12995                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12996         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12997
12998         rc = __bnxt_reserve_rings(bp);
12999         if (rc)
13000                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
13001         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13002         if (sh)
13003                 bnxt_trim_dflt_sh_rings(bp);
13004
13005         /* Rings may have been trimmed, re-reserve the trimmed rings. */
13006         if (bnxt_need_reserve_rings(bp)) {
13007                 rc = __bnxt_reserve_rings(bp);
13008                 if (rc)
13009                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13010                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13011         }
13012         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13013                 bp->rx_nr_rings++;
13014                 bp->cp_nr_rings++;
13015         }
13016         if (rc) {
13017                 bp->tx_nr_rings = 0;
13018                 bp->rx_nr_rings = 0;
13019         }
13020         return rc;
13021 }
13022
13023 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13024 {
13025         int rc;
13026
13027         if (bp->tx_nr_rings)
13028                 return 0;
13029
13030         bnxt_ulp_irq_stop(bp);
13031         bnxt_clear_int_mode(bp);
13032         rc = bnxt_set_dflt_rings(bp, true);
13033         if (rc) {
13034                 netdev_err(bp->dev, "Not enough rings available.\n");
13035                 goto init_dflt_ring_err;
13036         }
13037         rc = bnxt_init_int_mode(bp);
13038         if (rc)
13039                 goto init_dflt_ring_err;
13040
13041         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13042         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
13043                 bp->flags |= BNXT_FLAG_RFS;
13044                 bp->dev->features |= NETIF_F_NTUPLE;
13045         }
13046 init_dflt_ring_err:
13047         bnxt_ulp_irq_restart(bp, rc);
13048         return rc;
13049 }
13050
13051 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13052 {
13053         int rc;
13054
13055         ASSERT_RTNL();
13056         bnxt_hwrm_func_qcaps(bp);
13057
13058         if (netif_running(bp->dev))
13059                 __bnxt_close_nic(bp, true, false);
13060
13061         bnxt_ulp_irq_stop(bp);
13062         bnxt_clear_int_mode(bp);
13063         rc = bnxt_init_int_mode(bp);
13064         bnxt_ulp_irq_restart(bp, rc);
13065
13066         if (netif_running(bp->dev)) {
13067                 if (rc)
13068                         dev_close(bp->dev);
13069                 else
13070                         rc = bnxt_open_nic(bp, true, false);
13071         }
13072
13073         return rc;
13074 }
13075
13076 static int bnxt_init_mac_addr(struct bnxt *bp)
13077 {
13078         int rc = 0;
13079
13080         if (BNXT_PF(bp)) {
13081                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
13082         } else {
13083 #ifdef CONFIG_BNXT_SRIOV
13084                 struct bnxt_vf_info *vf = &bp->vf;
13085                 bool strict_approval = true;
13086
13087                 if (is_valid_ether_addr(vf->mac_addr)) {
13088                         /* overwrite netdev dev_addr with admin VF MAC */
13089                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
13090                         /* Older PF driver or firmware may not approve this
13091                          * correctly.
13092                          */
13093                         strict_approval = false;
13094                 } else {
13095                         eth_hw_addr_random(bp->dev);
13096                 }
13097                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13098 #endif
13099         }
13100         return rc;
13101 }
13102
13103 static void bnxt_vpd_read_info(struct bnxt *bp)
13104 {
13105         struct pci_dev *pdev = bp->pdev;
13106         unsigned int vpd_size, kw_len;
13107         int pos, size;
13108         u8 *vpd_data;
13109
13110         vpd_data = pci_vpd_alloc(pdev, &vpd_size);
13111         if (IS_ERR(vpd_data)) {
13112                 pci_warn(pdev, "Unable to read VPD\n");
13113                 return;
13114         }
13115
13116         pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13117                                            PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
13118         if (pos < 0)
13119                 goto read_sn;
13120
13121         size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13122         memcpy(bp->board_partno, &vpd_data[pos], size);
13123
13124 read_sn:
13125         pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13126                                            PCI_VPD_RO_KEYWORD_SERIALNO,
13127                                            &kw_len);
13128         if (pos < 0)
13129                 goto exit;
13130
13131         size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13132         memcpy(bp->board_serialno, &vpd_data[pos], size);
13133 exit:
13134         kfree(vpd_data);
13135 }
13136
13137 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13138 {
13139         struct pci_dev *pdev = bp->pdev;
13140         u64 qword;
13141
13142         qword = pci_get_dsn(pdev);
13143         if (!qword) {
13144                 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13145                 return -EOPNOTSUPP;
13146         }
13147
13148         put_unaligned_le64(qword, dsn);
13149
13150         bp->flags |= BNXT_FLAG_DSN_VALID;
13151         return 0;
13152 }
13153
13154 static int bnxt_map_db_bar(struct bnxt *bp)
13155 {
13156         if (!bp->db_size)
13157                 return -ENODEV;
13158         bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13159         if (!bp->bar1)
13160                 return -ENOMEM;
13161         return 0;
13162 }
13163
13164 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13165 {
13166         struct net_device *dev;
13167         struct bnxt *bp;
13168         int rc, max_irqs;
13169
13170         if (pci_is_bridge(pdev))
13171                 return -ENODEV;
13172
13173         /* Clear any pending DMA transactions from crash kernel
13174          * while loading driver in capture kernel.
13175          */
13176         if (is_kdump_kernel()) {
13177                 pci_clear_master(pdev);
13178                 pcie_flr(pdev);
13179         }
13180
13181         max_irqs = bnxt_get_max_irq(pdev);
13182         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13183         if (!dev)
13184                 return -ENOMEM;
13185
13186         bp = netdev_priv(dev);
13187         bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13188         bnxt_set_max_func_irqs(bp, max_irqs);
13189
13190         if (bnxt_vf_pciid(ent->driver_data))
13191                 bp->flags |= BNXT_FLAG_VF;
13192
13193         if (pdev->msix_cap)
13194                 bp->flags |= BNXT_FLAG_MSIX_CAP;
13195
13196         rc = bnxt_init_board(pdev, dev);
13197         if (rc < 0)
13198                 goto init_err_free;
13199
13200         dev->netdev_ops = &bnxt_netdev_ops;
13201         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13202         dev->ethtool_ops = &bnxt_ethtool_ops;
13203         pci_set_drvdata(pdev, dev);
13204
13205         rc = bnxt_alloc_hwrm_resources(bp);
13206         if (rc)
13207                 goto init_err_pci_clean;
13208
13209         mutex_init(&bp->hwrm_cmd_lock);
13210         mutex_init(&bp->link_lock);
13211
13212         rc = bnxt_fw_init_one_p1(bp);
13213         if (rc)
13214                 goto init_err_pci_clean;
13215
13216         if (BNXT_PF(bp))
13217                 bnxt_vpd_read_info(bp);
13218
13219         if (BNXT_CHIP_P5(bp)) {
13220                 bp->flags |= BNXT_FLAG_CHIP_P5;
13221                 if (BNXT_CHIP_SR2(bp))
13222                         bp->flags |= BNXT_FLAG_CHIP_SR2;
13223         }
13224
13225         rc = bnxt_alloc_rss_indir_tbl(bp);
13226         if (rc)
13227                 goto init_err_pci_clean;
13228
13229         rc = bnxt_fw_init_one_p2(bp);
13230         if (rc)
13231                 goto init_err_pci_clean;
13232
13233         rc = bnxt_map_db_bar(bp);
13234         if (rc) {
13235                 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13236                         rc);
13237                 goto init_err_pci_clean;
13238         }
13239
13240         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13241                            NETIF_F_TSO | NETIF_F_TSO6 |
13242                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13243                            NETIF_F_GSO_IPXIP4 |
13244                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13245                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13246                            NETIF_F_RXCSUM | NETIF_F_GRO;
13247
13248         if (BNXT_SUPPORTS_TPA(bp))
13249                 dev->hw_features |= NETIF_F_LRO;
13250
13251         dev->hw_enc_features =
13252                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13253                         NETIF_F_TSO | NETIF_F_TSO6 |
13254                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13255                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13256                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13257         dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13258
13259         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13260                                     NETIF_F_GSO_GRE_CSUM;
13261         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13262         if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13263                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13264         if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13265                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13266         if (BNXT_SUPPORTS_TPA(bp))
13267                 dev->hw_features |= NETIF_F_GRO_HW;
13268         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13269         if (dev->features & NETIF_F_GRO_HW)
13270                 dev->features &= ~NETIF_F_LRO;
13271         dev->priv_flags |= IFF_UNICAST_FLT;
13272
13273 #ifdef CONFIG_BNXT_SRIOV
13274         init_waitqueue_head(&bp->sriov_cfg_wait);
13275         mutex_init(&bp->sriov_lock);
13276 #endif
13277         if (BNXT_SUPPORTS_TPA(bp)) {
13278                 bp->gro_func = bnxt_gro_func_5730x;
13279                 if (BNXT_CHIP_P4(bp))
13280                         bp->gro_func = bnxt_gro_func_5731x;
13281                 else if (BNXT_CHIP_P5(bp))
13282                         bp->gro_func = bnxt_gro_func_5750x;
13283         }
13284         if (!BNXT_CHIP_P4_PLUS(bp))
13285                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13286
13287         rc = bnxt_init_mac_addr(bp);
13288         if (rc) {
13289                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13290                 rc = -EADDRNOTAVAIL;
13291                 goto init_err_pci_clean;
13292         }
13293
13294         if (BNXT_PF(bp)) {
13295                 /* Read the adapter's DSN to use as the eswitch switch_id */
13296                 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13297         }
13298
13299         /* MTU range: 60 - FW defined max */
13300         dev->min_mtu = ETH_ZLEN;
13301         dev->max_mtu = bp->max_mtu;
13302
13303         rc = bnxt_probe_phy(bp, true);
13304         if (rc)
13305                 goto init_err_pci_clean;
13306
13307         bnxt_set_rx_skb_mode(bp, false);
13308         bnxt_set_tpa_flags(bp);
13309         bnxt_set_ring_params(bp);
13310         rc = bnxt_set_dflt_rings(bp, true);
13311         if (rc) {
13312                 netdev_err(bp->dev, "Not enough rings available.\n");
13313                 rc = -ENOMEM;
13314                 goto init_err_pci_clean;
13315         }
13316
13317         bnxt_fw_init_one_p3(bp);
13318
13319         if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13320                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13321
13322         rc = bnxt_init_int_mode(bp);
13323         if (rc)
13324                 goto init_err_pci_clean;
13325
13326         /* No TC has been set yet and rings may have been trimmed due to
13327          * limited MSIX, so we re-initialize the TX rings per TC.
13328          */
13329         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13330
13331         if (BNXT_PF(bp)) {
13332                 if (!bnxt_pf_wq) {
13333                         bnxt_pf_wq =
13334                                 create_singlethread_workqueue("bnxt_pf_wq");
13335                         if (!bnxt_pf_wq) {
13336                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13337                                 rc = -ENOMEM;
13338                                 goto init_err_pci_clean;
13339                         }
13340                 }
13341                 rc = bnxt_init_tc(bp);
13342                 if (rc)
13343                         netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13344                                    rc);
13345         }
13346
13347         bnxt_inv_fw_health_reg(bp);
13348         bnxt_dl_register(bp);
13349
13350         rc = register_netdev(dev);
13351         if (rc)
13352                 goto init_err_cleanup;
13353
13354         if (BNXT_PF(bp))
13355                 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13356         bnxt_dl_fw_reporters_create(bp);
13357
13358         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13359                     board_info[ent->driver_data].name,
13360                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
13361         pcie_print_link_status(pdev);
13362
13363         pci_save_state(pdev);
13364         return 0;
13365
13366 init_err_cleanup:
13367         bnxt_dl_unregister(bp);
13368         bnxt_shutdown_tc(bp);
13369         bnxt_clear_int_mode(bp);
13370
13371 init_err_pci_clean:
13372         bnxt_hwrm_func_drv_unrgtr(bp);
13373         bnxt_free_hwrm_resources(bp);
13374         bnxt_ethtool_free(bp);
13375         bnxt_ptp_clear(bp);
13376         kfree(bp->ptp_cfg);
13377         bp->ptp_cfg = NULL;
13378         kfree(bp->fw_health);
13379         bp->fw_health = NULL;
13380         bnxt_cleanup_pci(bp);
13381         bnxt_free_ctx_mem(bp);
13382         kfree(bp->ctx);
13383         bp->ctx = NULL;
13384         kfree(bp->rss_indir_tbl);
13385         bp->rss_indir_tbl = NULL;
13386
13387 init_err_free:
13388         free_netdev(dev);
13389         return rc;
13390 }
13391
13392 static void bnxt_shutdown(struct pci_dev *pdev)
13393 {
13394         struct net_device *dev = pci_get_drvdata(pdev);
13395         struct bnxt *bp;
13396
13397         if (!dev)
13398                 return;
13399
13400         rtnl_lock();
13401         bp = netdev_priv(dev);
13402         if (!bp)
13403                 goto shutdown_exit;
13404
13405         if (netif_running(dev))
13406                 dev_close(dev);
13407
13408         bnxt_ulp_shutdown(bp);
13409         bnxt_clear_int_mode(bp);
13410         pci_disable_device(pdev);
13411
13412         if (system_state == SYSTEM_POWER_OFF) {
13413                 pci_wake_from_d3(pdev, bp->wol);
13414                 pci_set_power_state(pdev, PCI_D3hot);
13415         }
13416
13417 shutdown_exit:
13418         rtnl_unlock();
13419 }
13420
13421 #ifdef CONFIG_PM_SLEEP
13422 static int bnxt_suspend(struct device *device)
13423 {
13424         struct net_device *dev = dev_get_drvdata(device);
13425         struct bnxt *bp = netdev_priv(dev);
13426         int rc = 0;
13427
13428         rtnl_lock();
13429         bnxt_ulp_stop(bp);
13430         if (netif_running(dev)) {
13431                 netif_device_detach(dev);
13432                 rc = bnxt_close(dev);
13433         }
13434         bnxt_hwrm_func_drv_unrgtr(bp);
13435         pci_disable_device(bp->pdev);
13436         bnxt_free_ctx_mem(bp);
13437         kfree(bp->ctx);
13438         bp->ctx = NULL;
13439         rtnl_unlock();
13440         return rc;
13441 }
13442
13443 static int bnxt_resume(struct device *device)
13444 {
13445         struct net_device *dev = dev_get_drvdata(device);
13446         struct bnxt *bp = netdev_priv(dev);
13447         int rc = 0;
13448
13449         rtnl_lock();
13450         rc = pci_enable_device(bp->pdev);
13451         if (rc) {
13452                 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13453                            rc);
13454                 goto resume_exit;
13455         }
13456         pci_set_master(bp->pdev);
13457         if (bnxt_hwrm_ver_get(bp)) {
13458                 rc = -ENODEV;
13459                 goto resume_exit;
13460         }
13461         rc = bnxt_hwrm_func_reset(bp);
13462         if (rc) {
13463                 rc = -EBUSY;
13464                 goto resume_exit;
13465         }
13466
13467         rc = bnxt_hwrm_func_qcaps(bp);
13468         if (rc)
13469                 goto resume_exit;
13470
13471         if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13472                 rc = -ENODEV;
13473                 goto resume_exit;
13474         }
13475
13476         bnxt_get_wol_settings(bp);
13477         if (netif_running(dev)) {
13478                 rc = bnxt_open(dev);
13479                 if (!rc)
13480                         netif_device_attach(dev);
13481         }
13482
13483 resume_exit:
13484         bnxt_ulp_start(bp, rc);
13485         if (!rc)
13486                 bnxt_reenable_sriov(bp);
13487         rtnl_unlock();
13488         return rc;
13489 }
13490
13491 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13492 #define BNXT_PM_OPS (&bnxt_pm_ops)
13493
13494 #else
13495
13496 #define BNXT_PM_OPS NULL
13497
13498 #endif /* CONFIG_PM_SLEEP */
13499
13500 /**
13501  * bnxt_io_error_detected - called when PCI error is detected
13502  * @pdev: Pointer to PCI device
13503  * @state: The current pci connection state
13504  *
13505  * This function is called after a PCI bus error affecting
13506  * this device has been detected.
13507  */
13508 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13509                                                pci_channel_state_t state)
13510 {
13511         struct net_device *netdev = pci_get_drvdata(pdev);
13512         struct bnxt *bp = netdev_priv(netdev);
13513
13514         netdev_info(netdev, "PCI I/O error detected\n");
13515
13516         rtnl_lock();
13517         netif_device_detach(netdev);
13518
13519         bnxt_ulp_stop(bp);
13520
13521         if (state == pci_channel_io_perm_failure) {
13522                 rtnl_unlock();
13523                 return PCI_ERS_RESULT_DISCONNECT;
13524         }
13525
13526         if (state == pci_channel_io_frozen)
13527                 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13528
13529         if (netif_running(netdev))
13530                 bnxt_close(netdev);
13531
13532         if (pci_is_enabled(pdev))
13533                 pci_disable_device(pdev);
13534         bnxt_free_ctx_mem(bp);
13535         kfree(bp->ctx);
13536         bp->ctx = NULL;
13537         rtnl_unlock();
13538
13539         /* Request a slot slot reset. */
13540         return PCI_ERS_RESULT_NEED_RESET;
13541 }
13542
13543 /**
13544  * bnxt_io_slot_reset - called after the pci bus has been reset.
13545  * @pdev: Pointer to PCI device
13546  *
13547  * Restart the card from scratch, as if from a cold-boot.
13548  * At this point, the card has exprienced a hard reset,
13549  * followed by fixups by BIOS, and has its config space
13550  * set up identically to what it was at cold boot.
13551  */
13552 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13553 {
13554         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13555         struct net_device *netdev = pci_get_drvdata(pdev);
13556         struct bnxt *bp = netdev_priv(netdev);
13557         int err = 0, off;
13558
13559         netdev_info(bp->dev, "PCI Slot Reset\n");
13560
13561         rtnl_lock();
13562
13563         if (pci_enable_device(pdev)) {
13564                 dev_err(&pdev->dev,
13565                         "Cannot re-enable PCI device after reset.\n");
13566         } else {
13567                 pci_set_master(pdev);
13568                 /* Upon fatal error, our device internal logic that latches to
13569                  * BAR value is getting reset and will restore only upon
13570                  * rewritting the BARs.
13571                  *
13572                  * As pci_restore_state() does not re-write the BARs if the
13573                  * value is same as saved value earlier, driver needs to
13574                  * write the BARs to 0 to force restore, in case of fatal error.
13575                  */
13576                 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13577                                        &bp->state)) {
13578                         for (off = PCI_BASE_ADDRESS_0;
13579                              off <= PCI_BASE_ADDRESS_5; off += 4)
13580                                 pci_write_config_dword(bp->pdev, off, 0);
13581                 }
13582                 pci_restore_state(pdev);
13583                 pci_save_state(pdev);
13584
13585                 err = bnxt_hwrm_func_reset(bp);
13586                 if (!err)
13587                         result = PCI_ERS_RESULT_RECOVERED;
13588         }
13589
13590         rtnl_unlock();
13591
13592         return result;
13593 }
13594
13595 /**
13596  * bnxt_io_resume - called when traffic can start flowing again.
13597  * @pdev: Pointer to PCI device
13598  *
13599  * This callback is called when the error recovery driver tells
13600  * us that its OK to resume normal operation.
13601  */
13602 static void bnxt_io_resume(struct pci_dev *pdev)
13603 {
13604         struct net_device *netdev = pci_get_drvdata(pdev);
13605         struct bnxt *bp = netdev_priv(netdev);
13606         int err;
13607
13608         netdev_info(bp->dev, "PCI Slot Resume\n");
13609         rtnl_lock();
13610
13611         err = bnxt_hwrm_func_qcaps(bp);
13612         if (!err && netif_running(netdev))
13613                 err = bnxt_open(netdev);
13614
13615         bnxt_ulp_start(bp, err);
13616         if (!err) {
13617                 bnxt_reenable_sriov(bp);
13618                 netif_device_attach(netdev);
13619         }
13620
13621         rtnl_unlock();
13622 }
13623
13624 static const struct pci_error_handlers bnxt_err_handler = {
13625         .error_detected = bnxt_io_error_detected,
13626         .slot_reset     = bnxt_io_slot_reset,
13627         .resume         = bnxt_io_resume
13628 };
13629
13630 static struct pci_driver bnxt_pci_driver = {
13631         .name           = DRV_MODULE_NAME,
13632         .id_table       = bnxt_pci_tbl,
13633         .probe          = bnxt_init_one,
13634         .remove         = bnxt_remove_one,
13635         .shutdown       = bnxt_shutdown,
13636         .driver.pm      = BNXT_PM_OPS,
13637         .err_handler    = &bnxt_err_handler,
13638 #if defined(CONFIG_BNXT_SRIOV)
13639         .sriov_configure = bnxt_sriov_configure,
13640 #endif
13641 };
13642
13643 static int __init bnxt_init(void)
13644 {
13645         bnxt_debug_init();
13646         return pci_register_driver(&bnxt_pci_driver);
13647 }
13648
13649 static void __exit bnxt_exit(void)
13650 {
13651         pci_unregister_driver(&bnxt_pci_driver);
13652         if (bnxt_pf_wq)
13653                 destroy_workqueue(bnxt_pf_wq);
13654         bnxt_debug_exit();
13655 }
13656
13657 module_init(bnxt_init);
13658 module_exit(bnxt_exit);