Merge tag 'trace-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/ptp_clock_kernel.h>
53 #include <linux/timecounter.h>
54 #include <linux/cpu_rmap.h>
55 #include <linux/cpumask.h>
56 #include <net/pkt_cls.h>
57 #include <linux/hwmon.h>
58 #include <linux/hwmon-sysfs.h>
59 #include <net/page_pool.h>
60
61 #include "bnxt_hsi.h"
62 #include "bnxt.h"
63 #include "bnxt_hwrm.h"
64 #include "bnxt_ulp.h"
65 #include "bnxt_sriov.h"
66 #include "bnxt_ethtool.h"
67 #include "bnxt_dcb.h"
68 #include "bnxt_xdp.h"
69 #include "bnxt_ptp.h"
70 #include "bnxt_vfr.h"
71 #include "bnxt_tc.h"
72 #include "bnxt_devlink.h"
73 #include "bnxt_debugfs.h"
74
75 #define BNXT_TX_TIMEOUT         (5 * HZ)
76 #define BNXT_DEF_MSG_ENABLE     (NETIF_MSG_DRV | NETIF_MSG_HW | \
77                                  NETIF_MSG_TX_ERR)
78
79 MODULE_LICENSE("GPL");
80 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
81
82 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84 #define BNXT_RX_COPY_THRESH 256
85
86 #define BNXT_TX_PUSH_THRESH 164
87
88 enum board_idx {
89         BCM57301,
90         BCM57302,
91         BCM57304,
92         BCM57417_NPAR,
93         BCM58700,
94         BCM57311,
95         BCM57312,
96         BCM57402,
97         BCM57404,
98         BCM57406,
99         BCM57402_NPAR,
100         BCM57407,
101         BCM57412,
102         BCM57414,
103         BCM57416,
104         BCM57417,
105         BCM57412_NPAR,
106         BCM57314,
107         BCM57417_SFP,
108         BCM57416_SFP,
109         BCM57404_NPAR,
110         BCM57406_NPAR,
111         BCM57407_SFP,
112         BCM57407_NPAR,
113         BCM57414_NPAR,
114         BCM57416_NPAR,
115         BCM57452,
116         BCM57454,
117         BCM5745x_NPAR,
118         BCM57508,
119         BCM57504,
120         BCM57502,
121         BCM57508_NPAR,
122         BCM57504_NPAR,
123         BCM57502_NPAR,
124         BCM58802,
125         BCM58804,
126         BCM58808,
127         NETXTREME_E_VF,
128         NETXTREME_C_VF,
129         NETXTREME_S_VF,
130         NETXTREME_C_VF_HV,
131         NETXTREME_E_VF_HV,
132         NETXTREME_E_P5_VF,
133         NETXTREME_E_P5_VF_HV,
134 };
135
136 /* indexed by enum above */
137 static const struct {
138         char *name;
139 } board_info[] = {
140         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
141         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
142         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
143         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
144         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
145         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
146         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
147         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
148         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
149         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
150         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
151         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
152         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
153         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
154         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
155         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
156         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
157         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
158         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
159         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
160         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
161         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
162         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
163         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
164         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
165         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
166         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
167         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
168         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
169         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
170         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
171         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
172         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
173         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
174         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
175         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
176         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
177         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
178         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
179         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
180         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
181         [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
182         [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
183         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
184         [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
185 };
186
187 static const struct pci_device_id bnxt_pci_tbl[] = {
188         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
189         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
190         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
191         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
192         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
193         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
194         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
195         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
196         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
197         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
198         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
199         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
200         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
201         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
202         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
203         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
204         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
205         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
206         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
207         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
208         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
209         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
210         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
211         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
212         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
213         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
214         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
215         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
216         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
217         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
218         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
219         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
220         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
221         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
222         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
223         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
224         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
225         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
226         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
227         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
228         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
229         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
230         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
231         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
232         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
233         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
234 #ifdef CONFIG_BNXT_SRIOV
235         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
236         { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
237         { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
238         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
239         { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
240         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
241         { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
242         { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
243         { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
244         { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
245         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
246         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
247         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
248         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
249         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
250         { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
251         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
252         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
253         { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
254         { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
255         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
256 #endif
257         { 0 }
258 };
259
260 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
261
262 static const u16 bnxt_vf_req_snif[] = {
263         HWRM_FUNC_CFG,
264         HWRM_FUNC_VF_CFG,
265         HWRM_PORT_PHY_QCFG,
266         HWRM_CFA_L2_FILTER_ALLOC,
267 };
268
269 static const u16 bnxt_async_events_arr[] = {
270         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
271         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
272         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
273         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
274         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
275         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
276         ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
277         ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
278         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
279         ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
280         ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
281         ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
282         ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
283         ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
284         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
285 };
286
287 static struct workqueue_struct *bnxt_pf_wq;
288
289 static bool bnxt_vf_pciid(enum board_idx idx)
290 {
291         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
292                 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
293                 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
294                 idx == NETXTREME_E_P5_VF_HV);
295 }
296
297 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
298 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
299 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
300
301 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
302                 writel(DB_CP_IRQ_DIS_FLAGS, db)
303
304 #define BNXT_DB_CQ(db, idx)                                             \
305         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
306
307 #define BNXT_DB_NQ_P5(db, idx)                                          \
308         writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
309
310 #define BNXT_DB_CQ_ARM(db, idx)                                         \
311         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
312
313 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
314         writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
315
316 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
317 {
318         if (bp->flags & BNXT_FLAG_CHIP_P5)
319                 BNXT_DB_NQ_P5(db, idx);
320         else
321                 BNXT_DB_CQ(db, idx);
322 }
323
324 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
325 {
326         if (bp->flags & BNXT_FLAG_CHIP_P5)
327                 BNXT_DB_NQ_ARM_P5(db, idx);
328         else
329                 BNXT_DB_CQ_ARM(db, idx);
330 }
331
332 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
333 {
334         if (bp->flags & BNXT_FLAG_CHIP_P5)
335                 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
336                        db->doorbell);
337         else
338                 BNXT_DB_CQ(db, idx);
339 }
340
341 const u16 bnxt_lhint_arr[] = {
342         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
343         TX_BD_FLAGS_LHINT_512_TO_1023,
344         TX_BD_FLAGS_LHINT_1024_TO_2047,
345         TX_BD_FLAGS_LHINT_1024_TO_2047,
346         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
347         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
348         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
357         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
358         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
359         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
360         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
361 };
362
363 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
364 {
365         struct metadata_dst *md_dst = skb_metadata_dst(skb);
366
367         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
368                 return 0;
369
370         return md_dst->u.port_info.port_id;
371 }
372
373 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
374                              u16 prod)
375 {
376         bnxt_db_write(bp, &txr->tx_db, prod);
377         txr->kick_pending = 0;
378 }
379
380 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
381                                           struct bnxt_tx_ring_info *txr,
382                                           struct netdev_queue *txq)
383 {
384         netif_tx_stop_queue(txq);
385
386         /* netif_tx_stop_queue() must be done before checking
387          * tx index in bnxt_tx_avail() below, because in
388          * bnxt_tx_int(), we update tx index before checking for
389          * netif_tx_queue_stopped().
390          */
391         smp_mb();
392         if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
393                 netif_tx_wake_queue(txq);
394                 return false;
395         }
396
397         return true;
398 }
399
400 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
401 {
402         struct bnxt *bp = netdev_priv(dev);
403         struct tx_bd *txbd;
404         struct tx_bd_ext *txbd1;
405         struct netdev_queue *txq;
406         int i;
407         dma_addr_t mapping;
408         unsigned int length, pad = 0;
409         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
410         u16 prod, last_frag;
411         struct pci_dev *pdev = bp->pdev;
412         struct bnxt_tx_ring_info *txr;
413         struct bnxt_sw_tx_bd *tx_buf;
414         __le32 lflags = 0;
415
416         i = skb_get_queue_mapping(skb);
417         if (unlikely(i >= bp->tx_nr_rings)) {
418                 dev_kfree_skb_any(skb);
419                 atomic_long_inc(&dev->tx_dropped);
420                 return NETDEV_TX_OK;
421         }
422
423         txq = netdev_get_tx_queue(dev, i);
424         txr = &bp->tx_ring[bp->tx_ring_map[i]];
425         prod = txr->tx_prod;
426
427         free_size = bnxt_tx_avail(bp, txr);
428         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
429                 /* We must have raced with NAPI cleanup */
430                 if (net_ratelimit() && txr->kick_pending)
431                         netif_warn(bp, tx_err, dev,
432                                    "bnxt: ring busy w/ flush pending!\n");
433                 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
434                         return NETDEV_TX_BUSY;
435         }
436
437         length = skb->len;
438         len = skb_headlen(skb);
439         last_frag = skb_shinfo(skb)->nr_frags;
440
441         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
442
443         txbd->tx_bd_opaque = prod;
444
445         tx_buf = &txr->tx_buf_ring[prod];
446         tx_buf->skb = skb;
447         tx_buf->nr_frags = last_frag;
448
449         vlan_tag_flags = 0;
450         cfa_action = bnxt_xmit_get_cfa_action(skb);
451         if (skb_vlan_tag_present(skb)) {
452                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
453                                  skb_vlan_tag_get(skb);
454                 /* Currently supports 8021Q, 8021AD vlan offloads
455                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
456                  */
457                 if (skb->vlan_proto == htons(ETH_P_8021Q))
458                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
459         }
460
461         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
462                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
463
464                 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
465                     atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
466                         if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
467                                             &ptp->tx_hdr_off)) {
468                                 if (vlan_tag_flags)
469                                         ptp->tx_hdr_off += VLAN_HLEN;
470                                 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
471                                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
472                         } else {
473                                 atomic_inc(&bp->ptp_cfg->tx_avail);
474                         }
475                 }
476         }
477
478         if (unlikely(skb->no_fcs))
479                 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
480
481         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
482             !lflags) {
483                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
484                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
485                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
486                 void __iomem *db = txr->tx_db.doorbell;
487                 void *pdata = tx_push_buf->data;
488                 u64 *end;
489                 int j, push_len;
490
491                 /* Set COAL_NOW to be ready quickly for the next push */
492                 tx_push->tx_bd_len_flags_type =
493                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
494                                         TX_BD_TYPE_LONG_TX_BD |
495                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
496                                         TX_BD_FLAGS_COAL_NOW |
497                                         TX_BD_FLAGS_PACKET_END |
498                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
499
500                 if (skb->ip_summed == CHECKSUM_PARTIAL)
501                         tx_push1->tx_bd_hsize_lflags =
502                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
503                 else
504                         tx_push1->tx_bd_hsize_lflags = 0;
505
506                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
507                 tx_push1->tx_bd_cfa_action =
508                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
509
510                 end = pdata + length;
511                 end = PTR_ALIGN(end, 8) - 1;
512                 *end = 0;
513
514                 skb_copy_from_linear_data(skb, pdata, len);
515                 pdata += len;
516                 for (j = 0; j < last_frag; j++) {
517                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
518                         void *fptr;
519
520                         fptr = skb_frag_address_safe(frag);
521                         if (!fptr)
522                                 goto normal_tx;
523
524                         memcpy(pdata, fptr, skb_frag_size(frag));
525                         pdata += skb_frag_size(frag);
526                 }
527
528                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
529                 txbd->tx_bd_haddr = txr->data_mapping;
530                 prod = NEXT_TX(prod);
531                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
532                 memcpy(txbd, tx_push1, sizeof(*txbd));
533                 prod = NEXT_TX(prod);
534                 tx_push->doorbell =
535                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
536                 txr->tx_prod = prod;
537
538                 tx_buf->is_push = 1;
539                 netdev_tx_sent_queue(txq, skb->len);
540                 wmb();  /* Sync is_push and byte queue before pushing data */
541
542                 push_len = (length + sizeof(*tx_push) + 7) / 8;
543                 if (push_len > 16) {
544                         __iowrite64_copy(db, tx_push_buf, 16);
545                         __iowrite32_copy(db + 4, tx_push_buf + 1,
546                                          (push_len - 16) << 1);
547                 } else {
548                         __iowrite64_copy(db, tx_push_buf, push_len);
549                 }
550
551                 goto tx_done;
552         }
553
554 normal_tx:
555         if (length < BNXT_MIN_PKT_SIZE) {
556                 pad = BNXT_MIN_PKT_SIZE - length;
557                 if (skb_pad(skb, pad))
558                         /* SKB already freed. */
559                         goto tx_kick_pending;
560                 length = BNXT_MIN_PKT_SIZE;
561         }
562
563         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
564
565         if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
566                 goto tx_free;
567
568         dma_unmap_addr_set(tx_buf, mapping, mapping);
569         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
570                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
571
572         txbd->tx_bd_haddr = cpu_to_le64(mapping);
573
574         prod = NEXT_TX(prod);
575         txbd1 = (struct tx_bd_ext *)
576                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
577
578         txbd1->tx_bd_hsize_lflags = lflags;
579         if (skb_is_gso(skb)) {
580                 u32 hdr_len;
581
582                 if (skb->encapsulation)
583                         hdr_len = skb_inner_network_offset(skb) +
584                                 skb_inner_network_header_len(skb) +
585                                 inner_tcp_hdrlen(skb);
586                 else
587                         hdr_len = skb_transport_offset(skb) +
588                                 tcp_hdrlen(skb);
589
590                 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
591                                         TX_BD_FLAGS_T_IPID |
592                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
593                 length = skb_shinfo(skb)->gso_size;
594                 txbd1->tx_bd_mss = cpu_to_le32(length);
595                 length += hdr_len;
596         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
597                 txbd1->tx_bd_hsize_lflags |=
598                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
599                 txbd1->tx_bd_mss = 0;
600         }
601
602         length >>= 9;
603         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
604                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
605                                      skb->len);
606                 i = 0;
607                 goto tx_dma_error;
608         }
609         flags |= bnxt_lhint_arr[length];
610         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
611
612         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
613         txbd1->tx_bd_cfa_action =
614                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
615         for (i = 0; i < last_frag; i++) {
616                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
617
618                 prod = NEXT_TX(prod);
619                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
620
621                 len = skb_frag_size(frag);
622                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
623                                            DMA_TO_DEVICE);
624
625                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
626                         goto tx_dma_error;
627
628                 tx_buf = &txr->tx_buf_ring[prod];
629                 dma_unmap_addr_set(tx_buf, mapping, mapping);
630
631                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
632
633                 flags = len << TX_BD_LEN_SHIFT;
634                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
635         }
636
637         flags &= ~TX_BD_LEN;
638         txbd->tx_bd_len_flags_type =
639                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
640                             TX_BD_FLAGS_PACKET_END);
641
642         netdev_tx_sent_queue(txq, skb->len);
643
644         skb_tx_timestamp(skb);
645
646         /* Sync BD data before updating doorbell */
647         wmb();
648
649         prod = NEXT_TX(prod);
650         txr->tx_prod = prod;
651
652         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
653                 bnxt_txr_db_kick(bp, txr, prod);
654         else
655                 txr->kick_pending = 1;
656
657 tx_done:
658
659         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
660                 if (netdev_xmit_more() && !tx_buf->is_push)
661                         bnxt_txr_db_kick(bp, txr, prod);
662
663                 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
664         }
665         return NETDEV_TX_OK;
666
667 tx_dma_error:
668         if (BNXT_TX_PTP_IS_SET(lflags))
669                 atomic_inc(&bp->ptp_cfg->tx_avail);
670
671         last_frag = i;
672
673         /* start back at beginning and unmap skb */
674         prod = txr->tx_prod;
675         tx_buf = &txr->tx_buf_ring[prod];
676         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
677                          skb_headlen(skb), DMA_TO_DEVICE);
678         prod = NEXT_TX(prod);
679
680         /* unmap remaining mapped pages */
681         for (i = 0; i < last_frag; i++) {
682                 prod = NEXT_TX(prod);
683                 tx_buf = &txr->tx_buf_ring[prod];
684                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
685                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
686                                DMA_TO_DEVICE);
687         }
688
689 tx_free:
690         dev_kfree_skb_any(skb);
691 tx_kick_pending:
692         if (txr->kick_pending)
693                 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
694         txr->tx_buf_ring[txr->tx_prod].skb = NULL;
695         atomic_long_inc(&dev->tx_dropped);
696         return NETDEV_TX_OK;
697 }
698
699 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
700 {
701         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
702         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
703         u16 cons = txr->tx_cons;
704         struct pci_dev *pdev = bp->pdev;
705         int i;
706         unsigned int tx_bytes = 0;
707
708         for (i = 0; i < nr_pkts; i++) {
709                 struct bnxt_sw_tx_bd *tx_buf;
710                 bool compl_deferred = false;
711                 struct sk_buff *skb;
712                 int j, last;
713
714                 tx_buf = &txr->tx_buf_ring[cons];
715                 cons = NEXT_TX(cons);
716                 skb = tx_buf->skb;
717                 tx_buf->skb = NULL;
718
719                 if (tx_buf->is_push) {
720                         tx_buf->is_push = 0;
721                         goto next_tx_int;
722                 }
723
724                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
725                                  skb_headlen(skb), DMA_TO_DEVICE);
726                 last = tx_buf->nr_frags;
727
728                 for (j = 0; j < last; j++) {
729                         cons = NEXT_TX(cons);
730                         tx_buf = &txr->tx_buf_ring[cons];
731                         dma_unmap_page(
732                                 &pdev->dev,
733                                 dma_unmap_addr(tx_buf, mapping),
734                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
735                                 DMA_TO_DEVICE);
736                 }
737                 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
738                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
739                                 if (!bnxt_get_tx_ts_p5(bp, skb))
740                                         compl_deferred = true;
741                                 else
742                                         atomic_inc(&bp->ptp_cfg->tx_avail);
743                         }
744                 }
745
746 next_tx_int:
747                 cons = NEXT_TX(cons);
748
749                 tx_bytes += skb->len;
750                 if (!compl_deferred)
751                         dev_kfree_skb_any(skb);
752         }
753
754         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
755         txr->tx_cons = cons;
756
757         /* Need to make the tx_cons update visible to bnxt_start_xmit()
758          * before checking for netif_tx_queue_stopped().  Without the
759          * memory barrier, there is a small possibility that bnxt_start_xmit()
760          * will miss it and cause the queue to be stopped forever.
761          */
762         smp_mb();
763
764         if (unlikely(netif_tx_queue_stopped(txq)) &&
765             bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
766             READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
767                 netif_tx_wake_queue(txq);
768 }
769
770 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
771                                          struct bnxt_rx_ring_info *rxr,
772                                          gfp_t gfp)
773 {
774         struct device *dev = &bp->pdev->dev;
775         struct page *page;
776
777         page = page_pool_dev_alloc_pages(rxr->page_pool);
778         if (!page)
779                 return NULL;
780
781         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
782                                       DMA_ATTR_WEAK_ORDERING);
783         if (dma_mapping_error(dev, *mapping)) {
784                 page_pool_recycle_direct(rxr->page_pool, page);
785                 return NULL;
786         }
787         *mapping += bp->rx_dma_offset;
788         return page;
789 }
790
791 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
792                                        gfp_t gfp)
793 {
794         u8 *data;
795         struct pci_dev *pdev = bp->pdev;
796
797         data = kmalloc(bp->rx_buf_size, gfp);
798         if (!data)
799                 return NULL;
800
801         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
802                                         bp->rx_buf_use_size, bp->rx_dir,
803                                         DMA_ATTR_WEAK_ORDERING);
804
805         if (dma_mapping_error(&pdev->dev, *mapping)) {
806                 kfree(data);
807                 data = NULL;
808         }
809         return data;
810 }
811
812 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
813                        u16 prod, gfp_t gfp)
814 {
815         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
816         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
817         dma_addr_t mapping;
818
819         if (BNXT_RX_PAGE_MODE(bp)) {
820                 struct page *page =
821                         __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
822
823                 if (!page)
824                         return -ENOMEM;
825
826                 rx_buf->data = page;
827                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
828         } else {
829                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
830
831                 if (!data)
832                         return -ENOMEM;
833
834                 rx_buf->data = data;
835                 rx_buf->data_ptr = data + bp->rx_offset;
836         }
837         rx_buf->mapping = mapping;
838
839         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
840         return 0;
841 }
842
843 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
844 {
845         u16 prod = rxr->rx_prod;
846         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
847         struct rx_bd *cons_bd, *prod_bd;
848
849         prod_rx_buf = &rxr->rx_buf_ring[prod];
850         cons_rx_buf = &rxr->rx_buf_ring[cons];
851
852         prod_rx_buf->data = data;
853         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
854
855         prod_rx_buf->mapping = cons_rx_buf->mapping;
856
857         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
858         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
859
860         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
861 }
862
863 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
864 {
865         u16 next, max = rxr->rx_agg_bmap_size;
866
867         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
868         if (next >= max)
869                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
870         return next;
871 }
872
873 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
874                                      struct bnxt_rx_ring_info *rxr,
875                                      u16 prod, gfp_t gfp)
876 {
877         struct rx_bd *rxbd =
878                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
879         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
880         struct pci_dev *pdev = bp->pdev;
881         struct page *page;
882         dma_addr_t mapping;
883         u16 sw_prod = rxr->rx_sw_agg_prod;
884         unsigned int offset = 0;
885
886         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
887                 page = rxr->rx_page;
888                 if (!page) {
889                         page = alloc_page(gfp);
890                         if (!page)
891                                 return -ENOMEM;
892                         rxr->rx_page = page;
893                         rxr->rx_page_offset = 0;
894                 }
895                 offset = rxr->rx_page_offset;
896                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
897                 if (rxr->rx_page_offset == PAGE_SIZE)
898                         rxr->rx_page = NULL;
899                 else
900                         get_page(page);
901         } else {
902                 page = alloc_page(gfp);
903                 if (!page)
904                         return -ENOMEM;
905         }
906
907         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
908                                      BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
909                                      DMA_ATTR_WEAK_ORDERING);
910         if (dma_mapping_error(&pdev->dev, mapping)) {
911                 __free_page(page);
912                 return -EIO;
913         }
914
915         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
916                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
917
918         __set_bit(sw_prod, rxr->rx_agg_bmap);
919         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
920         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
921
922         rx_agg_buf->page = page;
923         rx_agg_buf->offset = offset;
924         rx_agg_buf->mapping = mapping;
925         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
926         rxbd->rx_bd_opaque = sw_prod;
927         return 0;
928 }
929
930 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
931                                        struct bnxt_cp_ring_info *cpr,
932                                        u16 cp_cons, u16 curr)
933 {
934         struct rx_agg_cmp *agg;
935
936         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
937         agg = (struct rx_agg_cmp *)
938                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
939         return agg;
940 }
941
942 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
943                                               struct bnxt_rx_ring_info *rxr,
944                                               u16 agg_id, u16 curr)
945 {
946         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
947
948         return &tpa_info->agg_arr[curr];
949 }
950
951 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
952                                    u16 start, u32 agg_bufs, bool tpa)
953 {
954         struct bnxt_napi *bnapi = cpr->bnapi;
955         struct bnxt *bp = bnapi->bp;
956         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
957         u16 prod = rxr->rx_agg_prod;
958         u16 sw_prod = rxr->rx_sw_agg_prod;
959         bool p5_tpa = false;
960         u32 i;
961
962         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
963                 p5_tpa = true;
964
965         for (i = 0; i < agg_bufs; i++) {
966                 u16 cons;
967                 struct rx_agg_cmp *agg;
968                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
969                 struct rx_bd *prod_bd;
970                 struct page *page;
971
972                 if (p5_tpa)
973                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
974                 else
975                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
976                 cons = agg->rx_agg_cmp_opaque;
977                 __clear_bit(cons, rxr->rx_agg_bmap);
978
979                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
980                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
981
982                 __set_bit(sw_prod, rxr->rx_agg_bmap);
983                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
984                 cons_rx_buf = &rxr->rx_agg_ring[cons];
985
986                 /* It is possible for sw_prod to be equal to cons, so
987                  * set cons_rx_buf->page to NULL first.
988                  */
989                 page = cons_rx_buf->page;
990                 cons_rx_buf->page = NULL;
991                 prod_rx_buf->page = page;
992                 prod_rx_buf->offset = cons_rx_buf->offset;
993
994                 prod_rx_buf->mapping = cons_rx_buf->mapping;
995
996                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
997
998                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
999                 prod_bd->rx_bd_opaque = sw_prod;
1000
1001                 prod = NEXT_RX_AGG(prod);
1002                 sw_prod = NEXT_RX_AGG(sw_prod);
1003         }
1004         rxr->rx_agg_prod = prod;
1005         rxr->rx_sw_agg_prod = sw_prod;
1006 }
1007
1008 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1009                                         struct bnxt_rx_ring_info *rxr,
1010                                         u16 cons, void *data, u8 *data_ptr,
1011                                         dma_addr_t dma_addr,
1012                                         unsigned int offset_and_len)
1013 {
1014         unsigned int payload = offset_and_len >> 16;
1015         unsigned int len = offset_and_len & 0xffff;
1016         skb_frag_t *frag;
1017         struct page *page = data;
1018         u16 prod = rxr->rx_prod;
1019         struct sk_buff *skb;
1020         int off, err;
1021
1022         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1023         if (unlikely(err)) {
1024                 bnxt_reuse_rx_data(rxr, cons, data);
1025                 return NULL;
1026         }
1027         dma_addr -= bp->rx_dma_offset;
1028         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1029                              DMA_ATTR_WEAK_ORDERING);
1030         page_pool_release_page(rxr->page_pool, page);
1031
1032         if (unlikely(!payload))
1033                 payload = eth_get_headlen(bp->dev, data_ptr, len);
1034
1035         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1036         if (!skb) {
1037                 __free_page(page);
1038                 return NULL;
1039         }
1040
1041         off = (void *)data_ptr - page_address(page);
1042         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1043         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1044                payload + NET_IP_ALIGN);
1045
1046         frag = &skb_shinfo(skb)->frags[0];
1047         skb_frag_size_sub(frag, payload);
1048         skb_frag_off_add(frag, payload);
1049         skb->data_len -= payload;
1050         skb->tail += payload;
1051
1052         return skb;
1053 }
1054
1055 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1056                                    struct bnxt_rx_ring_info *rxr, u16 cons,
1057                                    void *data, u8 *data_ptr,
1058                                    dma_addr_t dma_addr,
1059                                    unsigned int offset_and_len)
1060 {
1061         u16 prod = rxr->rx_prod;
1062         struct sk_buff *skb;
1063         int err;
1064
1065         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1066         if (unlikely(err)) {
1067                 bnxt_reuse_rx_data(rxr, cons, data);
1068                 return NULL;
1069         }
1070
1071         skb = build_skb(data, 0);
1072         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1073                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1074         if (!skb) {
1075                 kfree(data);
1076                 return NULL;
1077         }
1078
1079         skb_reserve(skb, bp->rx_offset);
1080         skb_put(skb, offset_and_len & 0xffff);
1081         return skb;
1082 }
1083
1084 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1085                                      struct bnxt_cp_ring_info *cpr,
1086                                      struct sk_buff *skb, u16 idx,
1087                                      u32 agg_bufs, bool tpa)
1088 {
1089         struct bnxt_napi *bnapi = cpr->bnapi;
1090         struct pci_dev *pdev = bp->pdev;
1091         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1092         u16 prod = rxr->rx_agg_prod;
1093         bool p5_tpa = false;
1094         u32 i;
1095
1096         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1097                 p5_tpa = true;
1098
1099         for (i = 0; i < agg_bufs; i++) {
1100                 u16 cons, frag_len;
1101                 struct rx_agg_cmp *agg;
1102                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1103                 struct page *page;
1104                 dma_addr_t mapping;
1105
1106                 if (p5_tpa)
1107                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1108                 else
1109                         agg = bnxt_get_agg(bp, cpr, idx, i);
1110                 cons = agg->rx_agg_cmp_opaque;
1111                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1112                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1113
1114                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1115                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1116                                    cons_rx_buf->offset, frag_len);
1117                 __clear_bit(cons, rxr->rx_agg_bmap);
1118
1119                 /* It is possible for bnxt_alloc_rx_page() to allocate
1120                  * a sw_prod index that equals the cons index, so we
1121                  * need to clear the cons entry now.
1122                  */
1123                 mapping = cons_rx_buf->mapping;
1124                 page = cons_rx_buf->page;
1125                 cons_rx_buf->page = NULL;
1126
1127                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1128                         struct skb_shared_info *shinfo;
1129                         unsigned int nr_frags;
1130
1131                         shinfo = skb_shinfo(skb);
1132                         nr_frags = --shinfo->nr_frags;
1133                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1134
1135                         dev_kfree_skb(skb);
1136
1137                         cons_rx_buf->page = page;
1138
1139                         /* Update prod since possibly some pages have been
1140                          * allocated already.
1141                          */
1142                         rxr->rx_agg_prod = prod;
1143                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1144                         return NULL;
1145                 }
1146
1147                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1148                                      DMA_FROM_DEVICE,
1149                                      DMA_ATTR_WEAK_ORDERING);
1150
1151                 skb->data_len += frag_len;
1152                 skb->len += frag_len;
1153                 skb->truesize += PAGE_SIZE;
1154
1155                 prod = NEXT_RX_AGG(prod);
1156         }
1157         rxr->rx_agg_prod = prod;
1158         return skb;
1159 }
1160
1161 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1162                                u8 agg_bufs, u32 *raw_cons)
1163 {
1164         u16 last;
1165         struct rx_agg_cmp *agg;
1166
1167         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1168         last = RING_CMP(*raw_cons);
1169         agg = (struct rx_agg_cmp *)
1170                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1171         return RX_AGG_CMP_VALID(agg, *raw_cons);
1172 }
1173
1174 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1175                                             unsigned int len,
1176                                             dma_addr_t mapping)
1177 {
1178         struct bnxt *bp = bnapi->bp;
1179         struct pci_dev *pdev = bp->pdev;
1180         struct sk_buff *skb;
1181
1182         skb = napi_alloc_skb(&bnapi->napi, len);
1183         if (!skb)
1184                 return NULL;
1185
1186         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1187                                 bp->rx_dir);
1188
1189         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1190                len + NET_IP_ALIGN);
1191
1192         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1193                                    bp->rx_dir);
1194
1195         skb_put(skb, len);
1196         return skb;
1197 }
1198
1199 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1200                            u32 *raw_cons, void *cmp)
1201 {
1202         struct rx_cmp *rxcmp = cmp;
1203         u32 tmp_raw_cons = *raw_cons;
1204         u8 cmp_type, agg_bufs = 0;
1205
1206         cmp_type = RX_CMP_TYPE(rxcmp);
1207
1208         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1209                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1210                             RX_CMP_AGG_BUFS) >>
1211                            RX_CMP_AGG_BUFS_SHIFT;
1212         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1213                 struct rx_tpa_end_cmp *tpa_end = cmp;
1214
1215                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1216                         return 0;
1217
1218                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1219         }
1220
1221         if (agg_bufs) {
1222                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1223                         return -EBUSY;
1224         }
1225         *raw_cons = tmp_raw_cons;
1226         return 0;
1227 }
1228
1229 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1230 {
1231         if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1232                 return;
1233
1234         if (BNXT_PF(bp))
1235                 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1236         else
1237                 schedule_delayed_work(&bp->fw_reset_task, delay);
1238 }
1239
1240 static void bnxt_queue_sp_work(struct bnxt *bp)
1241 {
1242         if (BNXT_PF(bp))
1243                 queue_work(bnxt_pf_wq, &bp->sp_task);
1244         else
1245                 schedule_work(&bp->sp_task);
1246 }
1247
1248 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1249 {
1250         if (!rxr->bnapi->in_reset) {
1251                 rxr->bnapi->in_reset = true;
1252                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1253                         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1254                 else
1255                         set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1256                 bnxt_queue_sp_work(bp);
1257         }
1258         rxr->rx_next_cons = 0xffff;
1259 }
1260
1261 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1262 {
1263         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1264         u16 idx = agg_id & MAX_TPA_P5_MASK;
1265
1266         if (test_bit(idx, map->agg_idx_bmap))
1267                 idx = find_first_zero_bit(map->agg_idx_bmap,
1268                                           BNXT_AGG_IDX_BMAP_SIZE);
1269         __set_bit(idx, map->agg_idx_bmap);
1270         map->agg_id_tbl[agg_id] = idx;
1271         return idx;
1272 }
1273
1274 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1275 {
1276         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1277
1278         __clear_bit(idx, map->agg_idx_bmap);
1279 }
1280
1281 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1282 {
1283         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1284
1285         return map->agg_id_tbl[agg_id];
1286 }
1287
1288 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1289                            struct rx_tpa_start_cmp *tpa_start,
1290                            struct rx_tpa_start_cmp_ext *tpa_start1)
1291 {
1292         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1293         struct bnxt_tpa_info *tpa_info;
1294         u16 cons, prod, agg_id;
1295         struct rx_bd *prod_bd;
1296         dma_addr_t mapping;
1297
1298         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1299                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1300                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1301         } else {
1302                 agg_id = TPA_START_AGG_ID(tpa_start);
1303         }
1304         cons = tpa_start->rx_tpa_start_cmp_opaque;
1305         prod = rxr->rx_prod;
1306         cons_rx_buf = &rxr->rx_buf_ring[cons];
1307         prod_rx_buf = &rxr->rx_buf_ring[prod];
1308         tpa_info = &rxr->rx_tpa[agg_id];
1309
1310         if (unlikely(cons != rxr->rx_next_cons ||
1311                      TPA_START_ERROR(tpa_start))) {
1312                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1313                             cons, rxr->rx_next_cons,
1314                             TPA_START_ERROR_CODE(tpa_start1));
1315                 bnxt_sched_reset(bp, rxr);
1316                 return;
1317         }
1318         /* Store cfa_code in tpa_info to use in tpa_end
1319          * completion processing.
1320          */
1321         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1322         prod_rx_buf->data = tpa_info->data;
1323         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1324
1325         mapping = tpa_info->mapping;
1326         prod_rx_buf->mapping = mapping;
1327
1328         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1329
1330         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1331
1332         tpa_info->data = cons_rx_buf->data;
1333         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1334         cons_rx_buf->data = NULL;
1335         tpa_info->mapping = cons_rx_buf->mapping;
1336
1337         tpa_info->len =
1338                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1339                                 RX_TPA_START_CMP_LEN_SHIFT;
1340         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1341                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1342
1343                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1344                 tpa_info->gso_type = SKB_GSO_TCPV4;
1345                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1346                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1347                         tpa_info->gso_type = SKB_GSO_TCPV6;
1348                 tpa_info->rss_hash =
1349                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1350         } else {
1351                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1352                 tpa_info->gso_type = 0;
1353                 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1354         }
1355         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1356         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1357         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1358         tpa_info->agg_count = 0;
1359
1360         rxr->rx_prod = NEXT_RX(prod);
1361         cons = NEXT_RX(cons);
1362         rxr->rx_next_cons = NEXT_RX(cons);
1363         cons_rx_buf = &rxr->rx_buf_ring[cons];
1364
1365         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1366         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1367         cons_rx_buf->data = NULL;
1368 }
1369
1370 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1371 {
1372         if (agg_bufs)
1373                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1374 }
1375
1376 #ifdef CONFIG_INET
1377 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1378 {
1379         struct udphdr *uh = NULL;
1380
1381         if (ip_proto == htons(ETH_P_IP)) {
1382                 struct iphdr *iph = (struct iphdr *)skb->data;
1383
1384                 if (iph->protocol == IPPROTO_UDP)
1385                         uh = (struct udphdr *)(iph + 1);
1386         } else {
1387                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1388
1389                 if (iph->nexthdr == IPPROTO_UDP)
1390                         uh = (struct udphdr *)(iph + 1);
1391         }
1392         if (uh) {
1393                 if (uh->check)
1394                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1395                 else
1396                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1397         }
1398 }
1399 #endif
1400
1401 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1402                                            int payload_off, int tcp_ts,
1403                                            struct sk_buff *skb)
1404 {
1405 #ifdef CONFIG_INET
1406         struct tcphdr *th;
1407         int len, nw_off;
1408         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1409         u32 hdr_info = tpa_info->hdr_info;
1410         bool loopback = false;
1411
1412         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1413         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1414         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1415
1416         /* If the packet is an internal loopback packet, the offsets will
1417          * have an extra 4 bytes.
1418          */
1419         if (inner_mac_off == 4) {
1420                 loopback = true;
1421         } else if (inner_mac_off > 4) {
1422                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1423                                             ETH_HLEN - 2));
1424
1425                 /* We only support inner iPv4/ipv6.  If we don't see the
1426                  * correct protocol ID, it must be a loopback packet where
1427                  * the offsets are off by 4.
1428                  */
1429                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1430                         loopback = true;
1431         }
1432         if (loopback) {
1433                 /* internal loopback packet, subtract all offsets by 4 */
1434                 inner_ip_off -= 4;
1435                 inner_mac_off -= 4;
1436                 outer_ip_off -= 4;
1437         }
1438
1439         nw_off = inner_ip_off - ETH_HLEN;
1440         skb_set_network_header(skb, nw_off);
1441         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1442                 struct ipv6hdr *iph = ipv6_hdr(skb);
1443
1444                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1445                 len = skb->len - skb_transport_offset(skb);
1446                 th = tcp_hdr(skb);
1447                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1448         } else {
1449                 struct iphdr *iph = ip_hdr(skb);
1450
1451                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1452                 len = skb->len - skb_transport_offset(skb);
1453                 th = tcp_hdr(skb);
1454                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1455         }
1456
1457         if (inner_mac_off) { /* tunnel */
1458                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1459                                             ETH_HLEN - 2));
1460
1461                 bnxt_gro_tunnel(skb, proto);
1462         }
1463 #endif
1464         return skb;
1465 }
1466
1467 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1468                                            int payload_off, int tcp_ts,
1469                                            struct sk_buff *skb)
1470 {
1471 #ifdef CONFIG_INET
1472         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1473         u32 hdr_info = tpa_info->hdr_info;
1474         int iphdr_len, nw_off;
1475
1476         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1477         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1478         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1479
1480         nw_off = inner_ip_off - ETH_HLEN;
1481         skb_set_network_header(skb, nw_off);
1482         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1483                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1484         skb_set_transport_header(skb, nw_off + iphdr_len);
1485
1486         if (inner_mac_off) { /* tunnel */
1487                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1488                                             ETH_HLEN - 2));
1489
1490                 bnxt_gro_tunnel(skb, proto);
1491         }
1492 #endif
1493         return skb;
1494 }
1495
1496 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1497 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1498
1499 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1500                                            int payload_off, int tcp_ts,
1501                                            struct sk_buff *skb)
1502 {
1503 #ifdef CONFIG_INET
1504         struct tcphdr *th;
1505         int len, nw_off, tcp_opt_len = 0;
1506
1507         if (tcp_ts)
1508                 tcp_opt_len = 12;
1509
1510         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1511                 struct iphdr *iph;
1512
1513                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1514                          ETH_HLEN;
1515                 skb_set_network_header(skb, nw_off);
1516                 iph = ip_hdr(skb);
1517                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1518                 len = skb->len - skb_transport_offset(skb);
1519                 th = tcp_hdr(skb);
1520                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1521         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1522                 struct ipv6hdr *iph;
1523
1524                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1525                          ETH_HLEN;
1526                 skb_set_network_header(skb, nw_off);
1527                 iph = ipv6_hdr(skb);
1528                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1529                 len = skb->len - skb_transport_offset(skb);
1530                 th = tcp_hdr(skb);
1531                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1532         } else {
1533                 dev_kfree_skb_any(skb);
1534                 return NULL;
1535         }
1536
1537         if (nw_off) /* tunnel */
1538                 bnxt_gro_tunnel(skb, skb->protocol);
1539 #endif
1540         return skb;
1541 }
1542
1543 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1544                                            struct bnxt_tpa_info *tpa_info,
1545                                            struct rx_tpa_end_cmp *tpa_end,
1546                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1547                                            struct sk_buff *skb)
1548 {
1549 #ifdef CONFIG_INET
1550         int payload_off;
1551         u16 segs;
1552
1553         segs = TPA_END_TPA_SEGS(tpa_end);
1554         if (segs == 1)
1555                 return skb;
1556
1557         NAPI_GRO_CB(skb)->count = segs;
1558         skb_shinfo(skb)->gso_size =
1559                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1560         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1561         if (bp->flags & BNXT_FLAG_CHIP_P5)
1562                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1563         else
1564                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1565         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1566         if (likely(skb))
1567                 tcp_gro_complete(skb);
1568 #endif
1569         return skb;
1570 }
1571
1572 /* Given the cfa_code of a received packet determine which
1573  * netdev (vf-rep or PF) the packet is destined to.
1574  */
1575 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1576 {
1577         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1578
1579         /* if vf-rep dev is NULL, the must belongs to the PF */
1580         return dev ? dev : bp->dev;
1581 }
1582
1583 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1584                                            struct bnxt_cp_ring_info *cpr,
1585                                            u32 *raw_cons,
1586                                            struct rx_tpa_end_cmp *tpa_end,
1587                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1588                                            u8 *event)
1589 {
1590         struct bnxt_napi *bnapi = cpr->bnapi;
1591         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1592         u8 *data_ptr, agg_bufs;
1593         unsigned int len;
1594         struct bnxt_tpa_info *tpa_info;
1595         dma_addr_t mapping;
1596         struct sk_buff *skb;
1597         u16 idx = 0, agg_id;
1598         void *data;
1599         bool gro;
1600
1601         if (unlikely(bnapi->in_reset)) {
1602                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1603
1604                 if (rc < 0)
1605                         return ERR_PTR(-EBUSY);
1606                 return NULL;
1607         }
1608
1609         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1610                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1611                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1612                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1613                 tpa_info = &rxr->rx_tpa[agg_id];
1614                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1615                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1616                                     agg_bufs, tpa_info->agg_count);
1617                         agg_bufs = tpa_info->agg_count;
1618                 }
1619                 tpa_info->agg_count = 0;
1620                 *event |= BNXT_AGG_EVENT;
1621                 bnxt_free_agg_idx(rxr, agg_id);
1622                 idx = agg_id;
1623                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1624         } else {
1625                 agg_id = TPA_END_AGG_ID(tpa_end);
1626                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1627                 tpa_info = &rxr->rx_tpa[agg_id];
1628                 idx = RING_CMP(*raw_cons);
1629                 if (agg_bufs) {
1630                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1631                                 return ERR_PTR(-EBUSY);
1632
1633                         *event |= BNXT_AGG_EVENT;
1634                         idx = NEXT_CMP(idx);
1635                 }
1636                 gro = !!TPA_END_GRO(tpa_end);
1637         }
1638         data = tpa_info->data;
1639         data_ptr = tpa_info->data_ptr;
1640         prefetch(data_ptr);
1641         len = tpa_info->len;
1642         mapping = tpa_info->mapping;
1643
1644         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1645                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1646                 if (agg_bufs > MAX_SKB_FRAGS)
1647                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1648                                     agg_bufs, (int)MAX_SKB_FRAGS);
1649                 return NULL;
1650         }
1651
1652         if (len <= bp->rx_copy_thresh) {
1653                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1654                 if (!skb) {
1655                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1656                         cpr->sw_stats.rx.rx_oom_discards += 1;
1657                         return NULL;
1658                 }
1659         } else {
1660                 u8 *new_data;
1661                 dma_addr_t new_mapping;
1662
1663                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1664                 if (!new_data) {
1665                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1666                         cpr->sw_stats.rx.rx_oom_discards += 1;
1667                         return NULL;
1668                 }
1669
1670                 tpa_info->data = new_data;
1671                 tpa_info->data_ptr = new_data + bp->rx_offset;
1672                 tpa_info->mapping = new_mapping;
1673
1674                 skb = build_skb(data, 0);
1675                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1676                                        bp->rx_buf_use_size, bp->rx_dir,
1677                                        DMA_ATTR_WEAK_ORDERING);
1678
1679                 if (!skb) {
1680                         kfree(data);
1681                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1682                         cpr->sw_stats.rx.rx_oom_discards += 1;
1683                         return NULL;
1684                 }
1685                 skb_reserve(skb, bp->rx_offset);
1686                 skb_put(skb, len);
1687         }
1688
1689         if (agg_bufs) {
1690                 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1691                 if (!skb) {
1692                         /* Page reuse already handled by bnxt_rx_pages(). */
1693                         cpr->sw_stats.rx.rx_oom_discards += 1;
1694                         return NULL;
1695                 }
1696         }
1697
1698         skb->protocol =
1699                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1700
1701         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1702                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1703
1704         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1705             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1706                 __be16 vlan_proto = htons(tpa_info->metadata >>
1707                                           RX_CMP_FLAGS2_METADATA_TPID_SFT);
1708                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1709
1710                 if (eth_type_vlan(vlan_proto)) {
1711                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1712                 } else {
1713                         dev_kfree_skb(skb);
1714                         return NULL;
1715                 }
1716         }
1717
1718         skb_checksum_none_assert(skb);
1719         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1720                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1721                 skb->csum_level =
1722                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1723         }
1724
1725         if (gro)
1726                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1727
1728         return skb;
1729 }
1730
1731 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1732                          struct rx_agg_cmp *rx_agg)
1733 {
1734         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1735         struct bnxt_tpa_info *tpa_info;
1736
1737         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1738         tpa_info = &rxr->rx_tpa[agg_id];
1739         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1740         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1741 }
1742
1743 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1744                              struct sk_buff *skb)
1745 {
1746         if (skb->dev != bp->dev) {
1747                 /* this packet belongs to a vf-rep */
1748                 bnxt_vf_rep_rx(bp, skb);
1749                 return;
1750         }
1751         skb_record_rx_queue(skb, bnapi->index);
1752         napi_gro_receive(&bnapi->napi, skb);
1753 }
1754
1755 /* returns the following:
1756  * 1       - 1 packet successfully received
1757  * 0       - successful TPA_START, packet not completed yet
1758  * -EBUSY  - completion ring does not have all the agg buffers yet
1759  * -ENOMEM - packet aborted due to out of memory
1760  * -EIO    - packet aborted due to hw error indicated in BD
1761  */
1762 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1763                        u32 *raw_cons, u8 *event)
1764 {
1765         struct bnxt_napi *bnapi = cpr->bnapi;
1766         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1767         struct net_device *dev = bp->dev;
1768         struct rx_cmp *rxcmp;
1769         struct rx_cmp_ext *rxcmp1;
1770         u32 tmp_raw_cons = *raw_cons;
1771         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1772         struct bnxt_sw_rx_bd *rx_buf;
1773         unsigned int len;
1774         u8 *data_ptr, agg_bufs, cmp_type;
1775         dma_addr_t dma_addr;
1776         struct sk_buff *skb;
1777         u32 flags, misc;
1778         void *data;
1779         int rc = 0;
1780
1781         rxcmp = (struct rx_cmp *)
1782                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1783
1784         cmp_type = RX_CMP_TYPE(rxcmp);
1785
1786         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1787                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1788                 goto next_rx_no_prod_no_len;
1789         }
1790
1791         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1792         cp_cons = RING_CMP(tmp_raw_cons);
1793         rxcmp1 = (struct rx_cmp_ext *)
1794                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1795
1796         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1797                 return -EBUSY;
1798
1799         /* The valid test of the entry must be done first before
1800          * reading any further.
1801          */
1802         dma_rmb();
1803         prod = rxr->rx_prod;
1804
1805         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1806                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1807                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1808
1809                 *event |= BNXT_RX_EVENT;
1810                 goto next_rx_no_prod_no_len;
1811
1812         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1813                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1814                                    (struct rx_tpa_end_cmp *)rxcmp,
1815                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1816
1817                 if (IS_ERR(skb))
1818                         return -EBUSY;
1819
1820                 rc = -ENOMEM;
1821                 if (likely(skb)) {
1822                         bnxt_deliver_skb(bp, bnapi, skb);
1823                         rc = 1;
1824                 }
1825                 *event |= BNXT_RX_EVENT;
1826                 goto next_rx_no_prod_no_len;
1827         }
1828
1829         cons = rxcmp->rx_cmp_opaque;
1830         if (unlikely(cons != rxr->rx_next_cons)) {
1831                 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1832
1833                 /* 0xffff is forced error, don't print it */
1834                 if (rxr->rx_next_cons != 0xffff)
1835                         netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1836                                     cons, rxr->rx_next_cons);
1837                 bnxt_sched_reset(bp, rxr);
1838                 if (rc1)
1839                         return rc1;
1840                 goto next_rx_no_prod_no_len;
1841         }
1842         rx_buf = &rxr->rx_buf_ring[cons];
1843         data = rx_buf->data;
1844         data_ptr = rx_buf->data_ptr;
1845         prefetch(data_ptr);
1846
1847         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1848         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1849
1850         if (agg_bufs) {
1851                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1852                         return -EBUSY;
1853
1854                 cp_cons = NEXT_CMP(cp_cons);
1855                 *event |= BNXT_AGG_EVENT;
1856         }
1857         *event |= BNXT_RX_EVENT;
1858
1859         rx_buf->data = NULL;
1860         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1861                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1862
1863                 bnxt_reuse_rx_data(rxr, cons, data);
1864                 if (agg_bufs)
1865                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1866                                                false);
1867
1868                 rc = -EIO;
1869                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1870                         bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1871                         if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1872                             !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1873                                 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1874                                                  rx_err);
1875                                 bnxt_sched_reset(bp, rxr);
1876                         }
1877                 }
1878                 goto next_rx_no_len;
1879         }
1880
1881         flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1882         len = flags >> RX_CMP_LEN_SHIFT;
1883         dma_addr = rx_buf->mapping;
1884
1885         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1886                 rc = 1;
1887                 goto next_rx;
1888         }
1889
1890         if (len <= bp->rx_copy_thresh) {
1891                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1892                 bnxt_reuse_rx_data(rxr, cons, data);
1893                 if (!skb) {
1894                         if (agg_bufs)
1895                                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1896                                                        agg_bufs, false);
1897                         cpr->sw_stats.rx.rx_oom_discards += 1;
1898                         rc = -ENOMEM;
1899                         goto next_rx;
1900                 }
1901         } else {
1902                 u32 payload;
1903
1904                 if (rx_buf->data_ptr == data_ptr)
1905                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1906                 else
1907                         payload = 0;
1908                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1909                                       payload | len);
1910                 if (!skb) {
1911                         cpr->sw_stats.rx.rx_oom_discards += 1;
1912                         rc = -ENOMEM;
1913                         goto next_rx;
1914                 }
1915         }
1916
1917         if (agg_bufs) {
1918                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1919                 if (!skb) {
1920                         cpr->sw_stats.rx.rx_oom_discards += 1;
1921                         rc = -ENOMEM;
1922                         goto next_rx;
1923                 }
1924         }
1925
1926         if (RX_CMP_HASH_VALID(rxcmp)) {
1927                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1928                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1929
1930                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1931                 if (hash_type != 1 && hash_type != 3)
1932                         type = PKT_HASH_TYPE_L3;
1933                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1934         }
1935
1936         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1937         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1938
1939         if ((rxcmp1->rx_cmp_flags2 &
1940              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1941             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1942                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1943                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1944                 __be16 vlan_proto = htons(meta_data >>
1945                                           RX_CMP_FLAGS2_METADATA_TPID_SFT);
1946
1947                 if (eth_type_vlan(vlan_proto)) {
1948                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1949                 } else {
1950                         dev_kfree_skb(skb);
1951                         goto next_rx;
1952                 }
1953         }
1954
1955         skb_checksum_none_assert(skb);
1956         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1957                 if (dev->features & NETIF_F_RXCSUM) {
1958                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1959                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1960                 }
1961         } else {
1962                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1963                         if (dev->features & NETIF_F_RXCSUM)
1964                                 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1965                 }
1966         }
1967
1968         if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1969                      RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1970                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1971                         u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1972                         u64 ns, ts;
1973
1974                         if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1975                                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1976
1977                                 spin_lock_bh(&ptp->ptp_lock);
1978                                 ns = timecounter_cyc2time(&ptp->tc, ts);
1979                                 spin_unlock_bh(&ptp->ptp_lock);
1980                                 memset(skb_hwtstamps(skb), 0,
1981                                        sizeof(*skb_hwtstamps(skb)));
1982                                 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1983                         }
1984                 }
1985         }
1986         bnxt_deliver_skb(bp, bnapi, skb);
1987         rc = 1;
1988
1989 next_rx:
1990         cpr->rx_packets += 1;
1991         cpr->rx_bytes += len;
1992
1993 next_rx_no_len:
1994         rxr->rx_prod = NEXT_RX(prod);
1995         rxr->rx_next_cons = NEXT_RX(cons);
1996
1997 next_rx_no_prod_no_len:
1998         *raw_cons = tmp_raw_cons;
1999
2000         return rc;
2001 }
2002
2003 /* In netpoll mode, if we are using a combined completion ring, we need to
2004  * discard the rx packets and recycle the buffers.
2005  */
2006 static int bnxt_force_rx_discard(struct bnxt *bp,
2007                                  struct bnxt_cp_ring_info *cpr,
2008                                  u32 *raw_cons, u8 *event)
2009 {
2010         u32 tmp_raw_cons = *raw_cons;
2011         struct rx_cmp_ext *rxcmp1;
2012         struct rx_cmp *rxcmp;
2013         u16 cp_cons;
2014         u8 cmp_type;
2015         int rc;
2016
2017         cp_cons = RING_CMP(tmp_raw_cons);
2018         rxcmp = (struct rx_cmp *)
2019                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2020
2021         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2022         cp_cons = RING_CMP(tmp_raw_cons);
2023         rxcmp1 = (struct rx_cmp_ext *)
2024                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2025
2026         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2027                 return -EBUSY;
2028
2029         /* The valid test of the entry must be done first before
2030          * reading any further.
2031          */
2032         dma_rmb();
2033         cmp_type = RX_CMP_TYPE(rxcmp);
2034         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2035                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2036                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2037         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2038                 struct rx_tpa_end_cmp_ext *tpa_end1;
2039
2040                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2041                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2042                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2043         }
2044         rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2045         if (rc && rc != -EBUSY)
2046                 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2047         return rc;
2048 }
2049
2050 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2051 {
2052         struct bnxt_fw_health *fw_health = bp->fw_health;
2053         u32 reg = fw_health->regs[reg_idx];
2054         u32 reg_type, reg_off, val = 0;
2055
2056         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2057         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2058         switch (reg_type) {
2059         case BNXT_FW_HEALTH_REG_TYPE_CFG:
2060                 pci_read_config_dword(bp->pdev, reg_off, &val);
2061                 break;
2062         case BNXT_FW_HEALTH_REG_TYPE_GRC:
2063                 reg_off = fw_health->mapped_regs[reg_idx];
2064                 fallthrough;
2065         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2066                 val = readl(bp->bar0 + reg_off);
2067                 break;
2068         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2069                 val = readl(bp->bar1 + reg_off);
2070                 break;
2071         }
2072         if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2073                 val &= fw_health->fw_reset_inprog_reg_mask;
2074         return val;
2075 }
2076
2077 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2078 {
2079         int i;
2080
2081         for (i = 0; i < bp->rx_nr_rings; i++) {
2082                 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2083                 struct bnxt_ring_grp_info *grp_info;
2084
2085                 grp_info = &bp->grp_info[grp_idx];
2086                 if (grp_info->agg_fw_ring_id == ring_id)
2087                         return grp_idx;
2088         }
2089         return INVALID_HW_RING_ID;
2090 }
2091
2092 static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2093 {
2094         switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
2095         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2096                 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2097                            BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2098                 break;
2099         default:
2100                 netdev_err(bp->dev, "FW reported unknown error type\n");
2101                 break;
2102         }
2103 }
2104
2105 #define BNXT_GET_EVENT_PORT(data)       \
2106         ((data) &                       \
2107          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2108
2109 #define BNXT_EVENT_RING_TYPE(data2)     \
2110         ((data2) &                      \
2111          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2112
2113 #define BNXT_EVENT_RING_TYPE_RX(data2)  \
2114         (BNXT_EVENT_RING_TYPE(data2) == \
2115          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2116
2117 static int bnxt_async_event_process(struct bnxt *bp,
2118                                     struct hwrm_async_event_cmpl *cmpl)
2119 {
2120         u16 event_id = le16_to_cpu(cmpl->event_id);
2121         u32 data1 = le32_to_cpu(cmpl->event_data1);
2122         u32 data2 = le32_to_cpu(cmpl->event_data2);
2123
2124         /* TODO CHIMP_FW: Define event id's for link change, error etc */
2125         switch (event_id) {
2126         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2127                 struct bnxt_link_info *link_info = &bp->link_info;
2128
2129                 if (BNXT_VF(bp))
2130                         goto async_event_process_exit;
2131
2132                 /* print unsupported speed warning in forced speed mode only */
2133                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2134                     (data1 & 0x20000)) {
2135                         u16 fw_speed = link_info->force_link_speed;
2136                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2137
2138                         if (speed != SPEED_UNKNOWN)
2139                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2140                                             speed);
2141                 }
2142                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2143         }
2144                 fallthrough;
2145         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2146         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2147                 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2148                 fallthrough;
2149         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2150                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2151                 break;
2152         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2153                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2154                 break;
2155         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2156                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2157
2158                 if (BNXT_VF(bp))
2159                         break;
2160
2161                 if (bp->pf.port_id != port_id)
2162                         break;
2163
2164                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2165                 break;
2166         }
2167         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2168                 if (BNXT_PF(bp))
2169                         goto async_event_process_exit;
2170                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2171                 break;
2172         case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2173                 char *fatal_str = "non-fatal";
2174
2175                 if (!bp->fw_health)
2176                         goto async_event_process_exit;
2177
2178                 bp->fw_reset_timestamp = jiffies;
2179                 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2180                 if (!bp->fw_reset_min_dsecs)
2181                         bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2182                 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2183                 if (!bp->fw_reset_max_dsecs)
2184                         bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2185                 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2186                         fatal_str = "fatal";
2187                         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2188                 }
2189                 netif_warn(bp, hw, bp->dev,
2190                            "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2191                            fatal_str, data1, data2,
2192                            bp->fw_reset_min_dsecs * 100,
2193                            bp->fw_reset_max_dsecs * 100);
2194                 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2195                 break;
2196         }
2197         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2198                 struct bnxt_fw_health *fw_health = bp->fw_health;
2199
2200                 if (!fw_health)
2201                         goto async_event_process_exit;
2202
2203                 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2204                 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2205                 if (!fw_health->enabled) {
2206                         netif_info(bp, drv, bp->dev,
2207                                    "Error recovery info: error recovery[0]\n");
2208                         break;
2209                 }
2210                 fw_health->tmr_multiplier =
2211                         DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2212                                      bp->current_interval * 10);
2213                 fw_health->tmr_counter = fw_health->tmr_multiplier;
2214                 fw_health->last_fw_heartbeat =
2215                         bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2216                 fw_health->last_fw_reset_cnt =
2217                         bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2218                 netif_info(bp, drv, bp->dev,
2219                            "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2220                            fw_health->master, fw_health->last_fw_reset_cnt,
2221                            bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2222                 goto async_event_process_exit;
2223         }
2224         case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2225                 netif_notice(bp, hw, bp->dev,
2226                              "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2227                              data1, data2);
2228                 goto async_event_process_exit;
2229         case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2230                 struct bnxt_rx_ring_info *rxr;
2231                 u16 grp_idx;
2232
2233                 if (bp->flags & BNXT_FLAG_CHIP_P5)
2234                         goto async_event_process_exit;
2235
2236                 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2237                             BNXT_EVENT_RING_TYPE(data2), data1);
2238                 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2239                         goto async_event_process_exit;
2240
2241                 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2242                 if (grp_idx == INVALID_HW_RING_ID) {
2243                         netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2244                                     data1);
2245                         goto async_event_process_exit;
2246                 }
2247                 rxr = bp->bnapi[grp_idx]->rx_ring;
2248                 bnxt_sched_reset(bp, rxr);
2249                 goto async_event_process_exit;
2250         }
2251         case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2252                 struct bnxt_fw_health *fw_health = bp->fw_health;
2253
2254                 netif_notice(bp, hw, bp->dev,
2255                              "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2256                              data1, data2);
2257                 if (fw_health) {
2258                         fw_health->echo_req_data1 = data1;
2259                         fw_health->echo_req_data2 = data2;
2260                         set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2261                         break;
2262                 }
2263                 goto async_event_process_exit;
2264         }
2265         case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2266                 bnxt_ptp_pps_event(bp, data1, data2);
2267                 goto async_event_process_exit;
2268         }
2269         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2270                 bnxt_event_error_report(bp, data1, data2);
2271                 goto async_event_process_exit;
2272         }
2273         case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2274                 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2275
2276                 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2277                 goto async_event_process_exit;
2278         }
2279         default:
2280                 goto async_event_process_exit;
2281         }
2282         bnxt_queue_sp_work(bp);
2283 async_event_process_exit:
2284         bnxt_ulp_async_events(bp, cmpl);
2285         return 0;
2286 }
2287
2288 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2289 {
2290         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2291         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2292         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2293                                 (struct hwrm_fwd_req_cmpl *)txcmp;
2294
2295         switch (cmpl_type) {
2296         case CMPL_BASE_TYPE_HWRM_DONE:
2297                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2298                 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2299                 break;
2300
2301         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2302                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2303
2304                 if ((vf_id < bp->pf.first_vf_id) ||
2305                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2306                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2307                                    vf_id);
2308                         return -EINVAL;
2309                 }
2310
2311                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2312                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2313                 bnxt_queue_sp_work(bp);
2314                 break;
2315
2316         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2317                 bnxt_async_event_process(bp,
2318                                          (struct hwrm_async_event_cmpl *)txcmp);
2319                 break;
2320
2321         default:
2322                 break;
2323         }
2324
2325         return 0;
2326 }
2327
2328 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2329 {
2330         struct bnxt_napi *bnapi = dev_instance;
2331         struct bnxt *bp = bnapi->bp;
2332         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2333         u32 cons = RING_CMP(cpr->cp_raw_cons);
2334
2335         cpr->event_ctr++;
2336         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2337         napi_schedule(&bnapi->napi);
2338         return IRQ_HANDLED;
2339 }
2340
2341 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2342 {
2343         u32 raw_cons = cpr->cp_raw_cons;
2344         u16 cons = RING_CMP(raw_cons);
2345         struct tx_cmp *txcmp;
2346
2347         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2348
2349         return TX_CMP_VALID(txcmp, raw_cons);
2350 }
2351
2352 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2353 {
2354         struct bnxt_napi *bnapi = dev_instance;
2355         struct bnxt *bp = bnapi->bp;
2356         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2357         u32 cons = RING_CMP(cpr->cp_raw_cons);
2358         u32 int_status;
2359
2360         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2361
2362         if (!bnxt_has_work(bp, cpr)) {
2363                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2364                 /* return if erroneous interrupt */
2365                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2366                         return IRQ_NONE;
2367         }
2368
2369         /* disable ring IRQ */
2370         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2371
2372         /* Return here if interrupt is shared and is disabled. */
2373         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2374                 return IRQ_HANDLED;
2375
2376         napi_schedule(&bnapi->napi);
2377         return IRQ_HANDLED;
2378 }
2379
2380 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2381                             int budget)
2382 {
2383         struct bnxt_napi *bnapi = cpr->bnapi;
2384         u32 raw_cons = cpr->cp_raw_cons;
2385         u32 cons;
2386         int tx_pkts = 0;
2387         int rx_pkts = 0;
2388         u8 event = 0;
2389         struct tx_cmp *txcmp;
2390
2391         cpr->has_more_work = 0;
2392         cpr->had_work_done = 1;
2393         while (1) {
2394                 int rc;
2395
2396                 cons = RING_CMP(raw_cons);
2397                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2398
2399                 if (!TX_CMP_VALID(txcmp, raw_cons))
2400                         break;
2401
2402                 /* The valid test of the entry must be done first before
2403                  * reading any further.
2404                  */
2405                 dma_rmb();
2406                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2407                         tx_pkts++;
2408                         /* return full budget so NAPI will complete. */
2409                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2410                                 rx_pkts = budget;
2411                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2412                                 if (budget)
2413                                         cpr->has_more_work = 1;
2414                                 break;
2415                         }
2416                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2417                         if (likely(budget))
2418                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2419                         else
2420                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2421                                                            &event);
2422                         if (likely(rc >= 0))
2423                                 rx_pkts += rc;
2424                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2425                          * the NAPI budget.  Otherwise, we may potentially loop
2426                          * here forever if we consistently cannot allocate
2427                          * buffers.
2428                          */
2429                         else if (rc == -ENOMEM && budget)
2430                                 rx_pkts++;
2431                         else if (rc == -EBUSY)  /* partial completion */
2432                                 break;
2433                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2434                                      CMPL_BASE_TYPE_HWRM_DONE) ||
2435                                     (TX_CMP_TYPE(txcmp) ==
2436                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2437                                     (TX_CMP_TYPE(txcmp) ==
2438                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2439                         bnxt_hwrm_handler(bp, txcmp);
2440                 }
2441                 raw_cons = NEXT_RAW_CMP(raw_cons);
2442
2443                 if (rx_pkts && rx_pkts == budget) {
2444                         cpr->has_more_work = 1;
2445                         break;
2446                 }
2447         }
2448
2449         if (event & BNXT_REDIRECT_EVENT)
2450                 xdp_do_flush_map();
2451
2452         if (event & BNXT_TX_EVENT) {
2453                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2454                 u16 prod = txr->tx_prod;
2455
2456                 /* Sync BD data before updating doorbell */
2457                 wmb();
2458
2459                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2460         }
2461
2462         cpr->cp_raw_cons = raw_cons;
2463         bnapi->tx_pkts += tx_pkts;
2464         bnapi->events |= event;
2465         return rx_pkts;
2466 }
2467
2468 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2469 {
2470         if (bnapi->tx_pkts) {
2471                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2472                 bnapi->tx_pkts = 0;
2473         }
2474
2475         if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2476                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2477
2478                 if (bnapi->events & BNXT_AGG_EVENT)
2479                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2480                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2481         }
2482         bnapi->events = 0;
2483 }
2484
2485 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2486                           int budget)
2487 {
2488         struct bnxt_napi *bnapi = cpr->bnapi;
2489         int rx_pkts;
2490
2491         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2492
2493         /* ACK completion ring before freeing tx ring and producing new
2494          * buffers in rx/agg rings to prevent overflowing the completion
2495          * ring.
2496          */
2497         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2498
2499         __bnxt_poll_work_done(bp, bnapi);
2500         return rx_pkts;
2501 }
2502
2503 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2504 {
2505         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2506         struct bnxt *bp = bnapi->bp;
2507         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2508         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2509         struct tx_cmp *txcmp;
2510         struct rx_cmp_ext *rxcmp1;
2511         u32 cp_cons, tmp_raw_cons;
2512         u32 raw_cons = cpr->cp_raw_cons;
2513         u32 rx_pkts = 0;
2514         u8 event = 0;
2515
2516         while (1) {
2517                 int rc;
2518
2519                 cp_cons = RING_CMP(raw_cons);
2520                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2521
2522                 if (!TX_CMP_VALID(txcmp, raw_cons))
2523                         break;
2524
2525                 /* The valid test of the entry must be done first before
2526                  * reading any further.
2527                  */
2528                 dma_rmb();
2529                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2530                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2531                         cp_cons = RING_CMP(tmp_raw_cons);
2532                         rxcmp1 = (struct rx_cmp_ext *)
2533                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2534
2535                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2536                                 break;
2537
2538                         /* force an error to recycle the buffer */
2539                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2540                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2541
2542                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2543                         if (likely(rc == -EIO) && budget)
2544                                 rx_pkts++;
2545                         else if (rc == -EBUSY)  /* partial completion */
2546                                 break;
2547                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2548                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2549                         bnxt_hwrm_handler(bp, txcmp);
2550                 } else {
2551                         netdev_err(bp->dev,
2552                                    "Invalid completion received on special ring\n");
2553                 }
2554                 raw_cons = NEXT_RAW_CMP(raw_cons);
2555
2556                 if (rx_pkts == budget)
2557                         break;
2558         }
2559
2560         cpr->cp_raw_cons = raw_cons;
2561         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2562         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2563
2564         if (event & BNXT_AGG_EVENT)
2565                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2566
2567         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2568                 napi_complete_done(napi, rx_pkts);
2569                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2570         }
2571         return rx_pkts;
2572 }
2573
2574 static int bnxt_poll(struct napi_struct *napi, int budget)
2575 {
2576         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2577         struct bnxt *bp = bnapi->bp;
2578         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2579         int work_done = 0;
2580
2581         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2582                 napi_complete(napi);
2583                 return 0;
2584         }
2585         while (1) {
2586                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2587
2588                 if (work_done >= budget) {
2589                         if (!budget)
2590                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2591                         break;
2592                 }
2593
2594                 if (!bnxt_has_work(bp, cpr)) {
2595                         if (napi_complete_done(napi, work_done))
2596                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2597                         break;
2598                 }
2599         }
2600         if (bp->flags & BNXT_FLAG_DIM) {
2601                 struct dim_sample dim_sample = {};
2602
2603                 dim_update_sample(cpr->event_ctr,
2604                                   cpr->rx_packets,
2605                                   cpr->rx_bytes,
2606                                   &dim_sample);
2607                 net_dim(&cpr->dim, dim_sample);
2608         }
2609         return work_done;
2610 }
2611
2612 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2613 {
2614         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2615         int i, work_done = 0;
2616
2617         for (i = 0; i < 2; i++) {
2618                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2619
2620                 if (cpr2) {
2621                         work_done += __bnxt_poll_work(bp, cpr2,
2622                                                       budget - work_done);
2623                         cpr->has_more_work |= cpr2->has_more_work;
2624                 }
2625         }
2626         return work_done;
2627 }
2628
2629 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2630                                  u64 dbr_type)
2631 {
2632         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2633         int i;
2634
2635         for (i = 0; i < 2; i++) {
2636                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2637                 struct bnxt_db_info *db;
2638
2639                 if (cpr2 && cpr2->had_work_done) {
2640                         db = &cpr2->cp_db;
2641                         writeq(db->db_key64 | dbr_type |
2642                                RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2643                         cpr2->had_work_done = 0;
2644                 }
2645         }
2646         __bnxt_poll_work_done(bp, bnapi);
2647 }
2648
2649 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2650 {
2651         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2652         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2653         u32 raw_cons = cpr->cp_raw_cons;
2654         struct bnxt *bp = bnapi->bp;
2655         struct nqe_cn *nqcmp;
2656         int work_done = 0;
2657         u32 cons;
2658
2659         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2660                 napi_complete(napi);
2661                 return 0;
2662         }
2663         if (cpr->has_more_work) {
2664                 cpr->has_more_work = 0;
2665                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2666         }
2667         while (1) {
2668                 cons = RING_CMP(raw_cons);
2669                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2670
2671                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2672                         if (cpr->has_more_work)
2673                                 break;
2674
2675                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2676                         cpr->cp_raw_cons = raw_cons;
2677                         if (napi_complete_done(napi, work_done))
2678                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2679                                                   cpr->cp_raw_cons);
2680                         return work_done;
2681                 }
2682
2683                 /* The valid test of the entry must be done first before
2684                  * reading any further.
2685                  */
2686                 dma_rmb();
2687
2688                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2689                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2690                         struct bnxt_cp_ring_info *cpr2;
2691
2692                         cpr2 = cpr->cp_ring_arr[idx];
2693                         work_done += __bnxt_poll_work(bp, cpr2,
2694                                                       budget - work_done);
2695                         cpr->has_more_work |= cpr2->has_more_work;
2696                 } else {
2697                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2698                 }
2699                 raw_cons = NEXT_RAW_CMP(raw_cons);
2700         }
2701         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2702         if (raw_cons != cpr->cp_raw_cons) {
2703                 cpr->cp_raw_cons = raw_cons;
2704                 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2705         }
2706         return work_done;
2707 }
2708
2709 static void bnxt_free_tx_skbs(struct bnxt *bp)
2710 {
2711         int i, max_idx;
2712         struct pci_dev *pdev = bp->pdev;
2713
2714         if (!bp->tx_ring)
2715                 return;
2716
2717         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2718         for (i = 0; i < bp->tx_nr_rings; i++) {
2719                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2720                 int j;
2721
2722                 for (j = 0; j < max_idx;) {
2723                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2724                         struct sk_buff *skb;
2725                         int k, last;
2726
2727                         if (i < bp->tx_nr_rings_xdp &&
2728                             tx_buf->action == XDP_REDIRECT) {
2729                                 dma_unmap_single(&pdev->dev,
2730                                         dma_unmap_addr(tx_buf, mapping),
2731                                         dma_unmap_len(tx_buf, len),
2732                                         DMA_TO_DEVICE);
2733                                 xdp_return_frame(tx_buf->xdpf);
2734                                 tx_buf->action = 0;
2735                                 tx_buf->xdpf = NULL;
2736                                 j++;
2737                                 continue;
2738                         }
2739
2740                         skb = tx_buf->skb;
2741                         if (!skb) {
2742                                 j++;
2743                                 continue;
2744                         }
2745
2746                         tx_buf->skb = NULL;
2747
2748                         if (tx_buf->is_push) {
2749                                 dev_kfree_skb(skb);
2750                                 j += 2;
2751                                 continue;
2752                         }
2753
2754                         dma_unmap_single(&pdev->dev,
2755                                          dma_unmap_addr(tx_buf, mapping),
2756                                          skb_headlen(skb),
2757                                          DMA_TO_DEVICE);
2758
2759                         last = tx_buf->nr_frags;
2760                         j += 2;
2761                         for (k = 0; k < last; k++, j++) {
2762                                 int ring_idx = j & bp->tx_ring_mask;
2763                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2764
2765                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2766                                 dma_unmap_page(
2767                                         &pdev->dev,
2768                                         dma_unmap_addr(tx_buf, mapping),
2769                                         skb_frag_size(frag), DMA_TO_DEVICE);
2770                         }
2771                         dev_kfree_skb(skb);
2772                 }
2773                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2774         }
2775 }
2776
2777 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2778 {
2779         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2780         struct pci_dev *pdev = bp->pdev;
2781         struct bnxt_tpa_idx_map *map;
2782         int i, max_idx, max_agg_idx;
2783
2784         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2785         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2786         if (!rxr->rx_tpa)
2787                 goto skip_rx_tpa_free;
2788
2789         for (i = 0; i < bp->max_tpa; i++) {
2790                 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2791                 u8 *data = tpa_info->data;
2792
2793                 if (!data)
2794                         continue;
2795
2796                 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2797                                        bp->rx_buf_use_size, bp->rx_dir,
2798                                        DMA_ATTR_WEAK_ORDERING);
2799
2800                 tpa_info->data = NULL;
2801
2802                 kfree(data);
2803         }
2804
2805 skip_rx_tpa_free:
2806         for (i = 0; i < max_idx; i++) {
2807                 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2808                 dma_addr_t mapping = rx_buf->mapping;
2809                 void *data = rx_buf->data;
2810
2811                 if (!data)
2812                         continue;
2813
2814                 rx_buf->data = NULL;
2815                 if (BNXT_RX_PAGE_MODE(bp)) {
2816                         mapping -= bp->rx_dma_offset;
2817                         dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2818                                              bp->rx_dir,
2819                                              DMA_ATTR_WEAK_ORDERING);
2820                         page_pool_recycle_direct(rxr->page_pool, data);
2821                 } else {
2822                         dma_unmap_single_attrs(&pdev->dev, mapping,
2823                                                bp->rx_buf_use_size, bp->rx_dir,
2824                                                DMA_ATTR_WEAK_ORDERING);
2825                         kfree(data);
2826                 }
2827         }
2828         for (i = 0; i < max_agg_idx; i++) {
2829                 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2830                 struct page *page = rx_agg_buf->page;
2831
2832                 if (!page)
2833                         continue;
2834
2835                 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2836                                      BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
2837                                      DMA_ATTR_WEAK_ORDERING);
2838
2839                 rx_agg_buf->page = NULL;
2840                 __clear_bit(i, rxr->rx_agg_bmap);
2841
2842                 __free_page(page);
2843         }
2844         if (rxr->rx_page) {
2845                 __free_page(rxr->rx_page);
2846                 rxr->rx_page = NULL;
2847         }
2848         map = rxr->rx_tpa_idx_map;
2849         if (map)
2850                 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2851 }
2852
2853 static void bnxt_free_rx_skbs(struct bnxt *bp)
2854 {
2855         int i;
2856
2857         if (!bp->rx_ring)
2858                 return;
2859
2860         for (i = 0; i < bp->rx_nr_rings; i++)
2861                 bnxt_free_one_rx_ring_skbs(bp, i);
2862 }
2863
2864 static void bnxt_free_skbs(struct bnxt *bp)
2865 {
2866         bnxt_free_tx_skbs(bp);
2867         bnxt_free_rx_skbs(bp);
2868 }
2869
2870 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2871 {
2872         u8 init_val = mem_init->init_val;
2873         u16 offset = mem_init->offset;
2874         u8 *p2 = p;
2875         int i;
2876
2877         if (!init_val)
2878                 return;
2879         if (offset == BNXT_MEM_INVALID_OFFSET) {
2880                 memset(p, init_val, len);
2881                 return;
2882         }
2883         for (i = 0; i < len; i += mem_init->size)
2884                 *(p2 + i + offset) = init_val;
2885 }
2886
2887 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2888 {
2889         struct pci_dev *pdev = bp->pdev;
2890         int i;
2891
2892         for (i = 0; i < rmem->nr_pages; i++) {
2893                 if (!rmem->pg_arr[i])
2894                         continue;
2895
2896                 dma_free_coherent(&pdev->dev, rmem->page_size,
2897                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2898
2899                 rmem->pg_arr[i] = NULL;
2900         }
2901         if (rmem->pg_tbl) {
2902                 size_t pg_tbl_size = rmem->nr_pages * 8;
2903
2904                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2905                         pg_tbl_size = rmem->page_size;
2906                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2907                                   rmem->pg_tbl, rmem->pg_tbl_map);
2908                 rmem->pg_tbl = NULL;
2909         }
2910         if (rmem->vmem_size && *rmem->vmem) {
2911                 vfree(*rmem->vmem);
2912                 *rmem->vmem = NULL;
2913         }
2914 }
2915
2916 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2917 {
2918         struct pci_dev *pdev = bp->pdev;
2919         u64 valid_bit = 0;
2920         int i;
2921
2922         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2923                 valid_bit = PTU_PTE_VALID;
2924         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2925                 size_t pg_tbl_size = rmem->nr_pages * 8;
2926
2927                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2928                         pg_tbl_size = rmem->page_size;
2929                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2930                                                   &rmem->pg_tbl_map,
2931                                                   GFP_KERNEL);
2932                 if (!rmem->pg_tbl)
2933                         return -ENOMEM;
2934         }
2935
2936         for (i = 0; i < rmem->nr_pages; i++) {
2937                 u64 extra_bits = valid_bit;
2938
2939                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2940                                                      rmem->page_size,
2941                                                      &rmem->dma_arr[i],
2942                                                      GFP_KERNEL);
2943                 if (!rmem->pg_arr[i])
2944                         return -ENOMEM;
2945
2946                 if (rmem->mem_init)
2947                         bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2948                                           rmem->page_size);
2949                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2950                         if (i == rmem->nr_pages - 2 &&
2951                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2952                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2953                         else if (i == rmem->nr_pages - 1 &&
2954                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2955                                 extra_bits |= PTU_PTE_LAST;
2956                         rmem->pg_tbl[i] =
2957                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2958                 }
2959         }
2960
2961         if (rmem->vmem_size) {
2962                 *rmem->vmem = vzalloc(rmem->vmem_size);
2963                 if (!(*rmem->vmem))
2964                         return -ENOMEM;
2965         }
2966         return 0;
2967 }
2968
2969 static void bnxt_free_tpa_info(struct bnxt *bp)
2970 {
2971         int i;
2972
2973         for (i = 0; i < bp->rx_nr_rings; i++) {
2974                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2975
2976                 kfree(rxr->rx_tpa_idx_map);
2977                 rxr->rx_tpa_idx_map = NULL;
2978                 if (rxr->rx_tpa) {
2979                         kfree(rxr->rx_tpa[0].agg_arr);
2980                         rxr->rx_tpa[0].agg_arr = NULL;
2981                 }
2982                 kfree(rxr->rx_tpa);
2983                 rxr->rx_tpa = NULL;
2984         }
2985 }
2986
2987 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2988 {
2989         int i, j, total_aggs = 0;
2990
2991         bp->max_tpa = MAX_TPA;
2992         if (bp->flags & BNXT_FLAG_CHIP_P5) {
2993                 if (!bp->max_tpa_v2)
2994                         return 0;
2995                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2996                 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2997         }
2998
2999         for (i = 0; i < bp->rx_nr_rings; i++) {
3000                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3001                 struct rx_agg_cmp *agg;
3002
3003                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3004                                       GFP_KERNEL);
3005                 if (!rxr->rx_tpa)
3006                         return -ENOMEM;
3007
3008                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3009                         continue;
3010                 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3011                 rxr->rx_tpa[0].agg_arr = agg;
3012                 if (!agg)
3013                         return -ENOMEM;
3014                 for (j = 1; j < bp->max_tpa; j++)
3015                         rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
3016                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3017                                               GFP_KERNEL);
3018                 if (!rxr->rx_tpa_idx_map)
3019                         return -ENOMEM;
3020         }
3021         return 0;
3022 }
3023
3024 static void bnxt_free_rx_rings(struct bnxt *bp)
3025 {
3026         int i;
3027
3028         if (!bp->rx_ring)
3029                 return;
3030
3031         bnxt_free_tpa_info(bp);
3032         for (i = 0; i < bp->rx_nr_rings; i++) {
3033                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3034                 struct bnxt_ring_struct *ring;
3035
3036                 if (rxr->xdp_prog)
3037                         bpf_prog_put(rxr->xdp_prog);
3038
3039                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3040                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3041
3042                 page_pool_destroy(rxr->page_pool);
3043                 rxr->page_pool = NULL;
3044
3045                 kfree(rxr->rx_agg_bmap);
3046                 rxr->rx_agg_bmap = NULL;
3047
3048                 ring = &rxr->rx_ring_struct;
3049                 bnxt_free_ring(bp, &ring->ring_mem);
3050
3051                 ring = &rxr->rx_agg_ring_struct;
3052                 bnxt_free_ring(bp, &ring->ring_mem);
3053         }
3054 }
3055
3056 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3057                                    struct bnxt_rx_ring_info *rxr)
3058 {
3059         struct page_pool_params pp = { 0 };
3060
3061         pp.pool_size = bp->rx_ring_size;
3062         pp.nid = dev_to_node(&bp->pdev->dev);
3063         pp.dev = &bp->pdev->dev;
3064         pp.dma_dir = DMA_BIDIRECTIONAL;
3065
3066         rxr->page_pool = page_pool_create(&pp);
3067         if (IS_ERR(rxr->page_pool)) {
3068                 int err = PTR_ERR(rxr->page_pool);
3069
3070                 rxr->page_pool = NULL;
3071                 return err;
3072         }
3073         return 0;
3074 }
3075
3076 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3077 {
3078         int i, rc = 0, agg_rings = 0;
3079
3080         if (!bp->rx_ring)
3081                 return -ENOMEM;
3082
3083         if (bp->flags & BNXT_FLAG_AGG_RINGS)
3084                 agg_rings = 1;
3085
3086         for (i = 0; i < bp->rx_nr_rings; i++) {
3087                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3088                 struct bnxt_ring_struct *ring;
3089
3090                 ring = &rxr->rx_ring_struct;
3091
3092                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3093                 if (rc)
3094                         return rc;
3095
3096                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3097                 if (rc < 0)
3098                         return rc;
3099
3100                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3101                                                 MEM_TYPE_PAGE_POOL,
3102                                                 rxr->page_pool);
3103                 if (rc) {
3104                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3105                         return rc;
3106                 }
3107
3108                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3109                 if (rc)
3110                         return rc;
3111
3112                 ring->grp_idx = i;
3113                 if (agg_rings) {
3114                         u16 mem_size;
3115
3116                         ring = &rxr->rx_agg_ring_struct;
3117                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3118                         if (rc)
3119                                 return rc;
3120
3121                         ring->grp_idx = i;
3122                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3123                         mem_size = rxr->rx_agg_bmap_size / 8;
3124                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3125                         if (!rxr->rx_agg_bmap)
3126                                 return -ENOMEM;
3127                 }
3128         }
3129         if (bp->flags & BNXT_FLAG_TPA)
3130                 rc = bnxt_alloc_tpa_info(bp);
3131         return rc;
3132 }
3133
3134 static void bnxt_free_tx_rings(struct bnxt *bp)
3135 {
3136         int i;
3137         struct pci_dev *pdev = bp->pdev;
3138
3139         if (!bp->tx_ring)
3140                 return;
3141
3142         for (i = 0; i < bp->tx_nr_rings; i++) {
3143                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3144                 struct bnxt_ring_struct *ring;
3145
3146                 if (txr->tx_push) {
3147                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
3148                                           txr->tx_push, txr->tx_push_mapping);
3149                         txr->tx_push = NULL;
3150                 }
3151
3152                 ring = &txr->tx_ring_struct;
3153
3154                 bnxt_free_ring(bp, &ring->ring_mem);
3155         }
3156 }
3157
3158 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3159 {
3160         int i, j, rc;
3161         struct pci_dev *pdev = bp->pdev;
3162
3163         bp->tx_push_size = 0;
3164         if (bp->tx_push_thresh) {
3165                 int push_size;
3166
3167                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3168                                         bp->tx_push_thresh);
3169
3170                 if (push_size > 256) {
3171                         push_size = 0;
3172                         bp->tx_push_thresh = 0;
3173                 }
3174
3175                 bp->tx_push_size = push_size;
3176         }
3177
3178         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3179                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3180                 struct bnxt_ring_struct *ring;
3181                 u8 qidx;
3182
3183                 ring = &txr->tx_ring_struct;
3184
3185                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3186                 if (rc)
3187                         return rc;
3188
3189                 ring->grp_idx = txr->bnapi->index;
3190                 if (bp->tx_push_size) {
3191                         dma_addr_t mapping;
3192
3193                         /* One pre-allocated DMA buffer to backup
3194                          * TX push operation
3195                          */
3196                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
3197                                                 bp->tx_push_size,
3198                                                 &txr->tx_push_mapping,
3199                                                 GFP_KERNEL);
3200
3201                         if (!txr->tx_push)
3202                                 return -ENOMEM;
3203
3204                         mapping = txr->tx_push_mapping +
3205                                 sizeof(struct tx_push_bd);
3206                         txr->data_mapping = cpu_to_le64(mapping);
3207                 }
3208                 qidx = bp->tc_to_qidx[j];
3209                 ring->queue_id = bp->q_info[qidx].queue_id;
3210                 if (i < bp->tx_nr_rings_xdp)
3211                         continue;
3212                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3213                         j++;
3214         }
3215         return 0;
3216 }
3217
3218 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3219 {
3220         kfree(cpr->cp_desc_ring);
3221         cpr->cp_desc_ring = NULL;
3222         kfree(cpr->cp_desc_mapping);
3223         cpr->cp_desc_mapping = NULL;
3224 }
3225
3226 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3227 {
3228         cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3229         if (!cpr->cp_desc_ring)
3230                 return -ENOMEM;
3231         cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3232                                        GFP_KERNEL);
3233         if (!cpr->cp_desc_mapping)
3234                 return -ENOMEM;
3235         return 0;
3236 }
3237
3238 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3239 {
3240         int i;
3241
3242         if (!bp->bnapi)
3243                 return;
3244         for (i = 0; i < bp->cp_nr_rings; i++) {
3245                 struct bnxt_napi *bnapi = bp->bnapi[i];
3246
3247                 if (!bnapi)
3248                         continue;
3249                 bnxt_free_cp_arrays(&bnapi->cp_ring);
3250         }
3251 }
3252
3253 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3254 {
3255         int i, n = bp->cp_nr_pages;
3256
3257         for (i = 0; i < bp->cp_nr_rings; i++) {
3258                 struct bnxt_napi *bnapi = bp->bnapi[i];
3259                 int rc;
3260
3261                 if (!bnapi)
3262                         continue;
3263                 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3264                 if (rc)
3265                         return rc;
3266         }
3267         return 0;
3268 }
3269
3270 static void bnxt_free_cp_rings(struct bnxt *bp)
3271 {
3272         int i;
3273
3274         if (!bp->bnapi)
3275                 return;
3276
3277         for (i = 0; i < bp->cp_nr_rings; i++) {
3278                 struct bnxt_napi *bnapi = bp->bnapi[i];
3279                 struct bnxt_cp_ring_info *cpr;
3280                 struct bnxt_ring_struct *ring;
3281                 int j;
3282
3283                 if (!bnapi)
3284                         continue;
3285
3286                 cpr = &bnapi->cp_ring;
3287                 ring = &cpr->cp_ring_struct;
3288
3289                 bnxt_free_ring(bp, &ring->ring_mem);
3290
3291                 for (j = 0; j < 2; j++) {
3292                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3293
3294                         if (cpr2) {
3295                                 ring = &cpr2->cp_ring_struct;
3296                                 bnxt_free_ring(bp, &ring->ring_mem);
3297                                 bnxt_free_cp_arrays(cpr2);
3298                                 kfree(cpr2);
3299                                 cpr->cp_ring_arr[j] = NULL;
3300                         }
3301                 }
3302         }
3303 }
3304
3305 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3306 {
3307         struct bnxt_ring_mem_info *rmem;
3308         struct bnxt_ring_struct *ring;
3309         struct bnxt_cp_ring_info *cpr;
3310         int rc;
3311
3312         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3313         if (!cpr)
3314                 return NULL;
3315
3316         rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3317         if (rc) {
3318                 bnxt_free_cp_arrays(cpr);
3319                 kfree(cpr);
3320                 return NULL;
3321         }
3322         ring = &cpr->cp_ring_struct;
3323         rmem = &ring->ring_mem;
3324         rmem->nr_pages = bp->cp_nr_pages;
3325         rmem->page_size = HW_CMPD_RING_SIZE;
3326         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3327         rmem->dma_arr = cpr->cp_desc_mapping;
3328         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3329         rc = bnxt_alloc_ring(bp, rmem);
3330         if (rc) {
3331                 bnxt_free_ring(bp, rmem);
3332                 bnxt_free_cp_arrays(cpr);
3333                 kfree(cpr);
3334                 cpr = NULL;
3335         }
3336         return cpr;
3337 }
3338
3339 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3340 {
3341         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3342         int i, rc, ulp_base_vec, ulp_msix;
3343
3344         ulp_msix = bnxt_get_ulp_msix_num(bp);
3345         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3346         for (i = 0; i < bp->cp_nr_rings; i++) {
3347                 struct bnxt_napi *bnapi = bp->bnapi[i];
3348                 struct bnxt_cp_ring_info *cpr;
3349                 struct bnxt_ring_struct *ring;
3350
3351                 if (!bnapi)
3352                         continue;
3353
3354                 cpr = &bnapi->cp_ring;
3355                 cpr->bnapi = bnapi;
3356                 ring = &cpr->cp_ring_struct;
3357
3358                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3359                 if (rc)
3360                         return rc;
3361
3362                 if (ulp_msix && i >= ulp_base_vec)
3363                         ring->map_idx = i + ulp_msix;
3364                 else
3365                         ring->map_idx = i;
3366
3367                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3368                         continue;
3369
3370                 if (i < bp->rx_nr_rings) {
3371                         struct bnxt_cp_ring_info *cpr2 =
3372                                 bnxt_alloc_cp_sub_ring(bp);
3373
3374                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3375                         if (!cpr2)
3376                                 return -ENOMEM;
3377                         cpr2->bnapi = bnapi;
3378                 }
3379                 if ((sh && i < bp->tx_nr_rings) ||
3380                     (!sh && i >= bp->rx_nr_rings)) {
3381                         struct bnxt_cp_ring_info *cpr2 =
3382                                 bnxt_alloc_cp_sub_ring(bp);
3383
3384                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3385                         if (!cpr2)
3386                                 return -ENOMEM;
3387                         cpr2->bnapi = bnapi;
3388                 }
3389         }
3390         return 0;
3391 }
3392
3393 static void bnxt_init_ring_struct(struct bnxt *bp)
3394 {
3395         int i;
3396
3397         for (i = 0; i < bp->cp_nr_rings; i++) {
3398                 struct bnxt_napi *bnapi = bp->bnapi[i];
3399                 struct bnxt_ring_mem_info *rmem;
3400                 struct bnxt_cp_ring_info *cpr;
3401                 struct bnxt_rx_ring_info *rxr;
3402                 struct bnxt_tx_ring_info *txr;
3403                 struct bnxt_ring_struct *ring;
3404
3405                 if (!bnapi)
3406                         continue;
3407
3408                 cpr = &bnapi->cp_ring;
3409                 ring = &cpr->cp_ring_struct;
3410                 rmem = &ring->ring_mem;
3411                 rmem->nr_pages = bp->cp_nr_pages;
3412                 rmem->page_size = HW_CMPD_RING_SIZE;
3413                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3414                 rmem->dma_arr = cpr->cp_desc_mapping;
3415                 rmem->vmem_size = 0;
3416
3417                 rxr = bnapi->rx_ring;
3418                 if (!rxr)
3419                         goto skip_rx;
3420
3421                 ring = &rxr->rx_ring_struct;
3422                 rmem = &ring->ring_mem;
3423                 rmem->nr_pages = bp->rx_nr_pages;
3424                 rmem->page_size = HW_RXBD_RING_SIZE;
3425                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3426                 rmem->dma_arr = rxr->rx_desc_mapping;
3427                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3428                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3429
3430                 ring = &rxr->rx_agg_ring_struct;
3431                 rmem = &ring->ring_mem;
3432                 rmem->nr_pages = bp->rx_agg_nr_pages;
3433                 rmem->page_size = HW_RXBD_RING_SIZE;
3434                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3435                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3436                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3437                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3438
3439 skip_rx:
3440                 txr = bnapi->tx_ring;
3441                 if (!txr)
3442                         continue;
3443
3444                 ring = &txr->tx_ring_struct;
3445                 rmem = &ring->ring_mem;
3446                 rmem->nr_pages = bp->tx_nr_pages;
3447                 rmem->page_size = HW_RXBD_RING_SIZE;
3448                 rmem->pg_arr = (void **)txr->tx_desc_ring;
3449                 rmem->dma_arr = txr->tx_desc_mapping;
3450                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3451                 rmem->vmem = (void **)&txr->tx_buf_ring;
3452         }
3453 }
3454
3455 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3456 {
3457         int i;
3458         u32 prod;
3459         struct rx_bd **rx_buf_ring;
3460
3461         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3462         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3463                 int j;
3464                 struct rx_bd *rxbd;
3465
3466                 rxbd = rx_buf_ring[i];
3467                 if (!rxbd)
3468                         continue;
3469
3470                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3471                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3472                         rxbd->rx_bd_opaque = prod;
3473                 }
3474         }
3475 }
3476
3477 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3478 {
3479         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3480         struct net_device *dev = bp->dev;
3481         u32 prod;
3482         int i;
3483
3484         prod = rxr->rx_prod;
3485         for (i = 0; i < bp->rx_ring_size; i++) {
3486                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3487                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3488                                     ring_nr, i, bp->rx_ring_size);
3489                         break;
3490                 }
3491                 prod = NEXT_RX(prod);
3492         }
3493         rxr->rx_prod = prod;
3494
3495         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3496                 return 0;
3497
3498         prod = rxr->rx_agg_prod;
3499         for (i = 0; i < bp->rx_agg_ring_size; i++) {
3500                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3501                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3502                                     ring_nr, i, bp->rx_ring_size);
3503                         break;
3504                 }
3505                 prod = NEXT_RX_AGG(prod);
3506         }
3507         rxr->rx_agg_prod = prod;
3508
3509         if (rxr->rx_tpa) {
3510                 dma_addr_t mapping;
3511                 u8 *data;
3512
3513                 for (i = 0; i < bp->max_tpa; i++) {
3514                         data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3515                         if (!data)
3516                                 return -ENOMEM;
3517
3518                         rxr->rx_tpa[i].data = data;
3519                         rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3520                         rxr->rx_tpa[i].mapping = mapping;
3521                 }
3522         }
3523         return 0;
3524 }
3525
3526 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3527 {
3528         struct bnxt_rx_ring_info *rxr;
3529         struct bnxt_ring_struct *ring;
3530         u32 type;
3531
3532         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3533                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3534
3535         if (NET_IP_ALIGN == 2)
3536                 type |= RX_BD_FLAGS_SOP;
3537
3538         rxr = &bp->rx_ring[ring_nr];
3539         ring = &rxr->rx_ring_struct;
3540         bnxt_init_rxbd_pages(ring, type);
3541
3542         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3543                 bpf_prog_add(bp->xdp_prog, 1);
3544                 rxr->xdp_prog = bp->xdp_prog;
3545         }
3546         ring->fw_ring_id = INVALID_HW_RING_ID;
3547
3548         ring = &rxr->rx_agg_ring_struct;
3549         ring->fw_ring_id = INVALID_HW_RING_ID;
3550
3551         if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3552                 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3553                         RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3554
3555                 bnxt_init_rxbd_pages(ring, type);
3556         }
3557
3558         return bnxt_alloc_one_rx_ring(bp, ring_nr);
3559 }
3560
3561 static void bnxt_init_cp_rings(struct bnxt *bp)
3562 {
3563         int i, j;
3564
3565         for (i = 0; i < bp->cp_nr_rings; i++) {
3566                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3567                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3568
3569                 ring->fw_ring_id = INVALID_HW_RING_ID;
3570                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3571                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3572                 for (j = 0; j < 2; j++) {
3573                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3574
3575                         if (!cpr2)
3576                                 continue;
3577
3578                         ring = &cpr2->cp_ring_struct;
3579                         ring->fw_ring_id = INVALID_HW_RING_ID;
3580                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3581                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3582                 }
3583         }
3584 }
3585
3586 static int bnxt_init_rx_rings(struct bnxt *bp)
3587 {
3588         int i, rc = 0;
3589
3590         if (BNXT_RX_PAGE_MODE(bp)) {
3591                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3592                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3593         } else {
3594                 bp->rx_offset = BNXT_RX_OFFSET;
3595                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3596         }
3597
3598         for (i = 0; i < bp->rx_nr_rings; i++) {
3599                 rc = bnxt_init_one_rx_ring(bp, i);
3600                 if (rc)
3601                         break;
3602         }
3603
3604         return rc;
3605 }
3606
3607 static int bnxt_init_tx_rings(struct bnxt *bp)
3608 {
3609         u16 i;
3610
3611         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3612                                    MAX_SKB_FRAGS + 1);
3613
3614         for (i = 0; i < bp->tx_nr_rings; i++) {
3615                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3616                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3617
3618                 ring->fw_ring_id = INVALID_HW_RING_ID;
3619         }
3620
3621         return 0;
3622 }
3623
3624 static void bnxt_free_ring_grps(struct bnxt *bp)
3625 {
3626         kfree(bp->grp_info);
3627         bp->grp_info = NULL;
3628 }
3629
3630 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3631 {
3632         int i;
3633
3634         if (irq_re_init) {
3635                 bp->grp_info = kcalloc(bp->cp_nr_rings,
3636                                        sizeof(struct bnxt_ring_grp_info),
3637                                        GFP_KERNEL);
3638                 if (!bp->grp_info)
3639                         return -ENOMEM;
3640         }
3641         for (i = 0; i < bp->cp_nr_rings; i++) {
3642                 if (irq_re_init)
3643                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3644                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3645                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3646                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3647                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3648         }
3649         return 0;
3650 }
3651
3652 static void bnxt_free_vnics(struct bnxt *bp)
3653 {
3654         kfree(bp->vnic_info);
3655         bp->vnic_info = NULL;
3656         bp->nr_vnics = 0;
3657 }
3658
3659 static int bnxt_alloc_vnics(struct bnxt *bp)
3660 {
3661         int num_vnics = 1;
3662
3663 #ifdef CONFIG_RFS_ACCEL
3664         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3665                 num_vnics += bp->rx_nr_rings;
3666 #endif
3667
3668         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3669                 num_vnics++;
3670
3671         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3672                                 GFP_KERNEL);
3673         if (!bp->vnic_info)
3674                 return -ENOMEM;
3675
3676         bp->nr_vnics = num_vnics;
3677         return 0;
3678 }
3679
3680 static void bnxt_init_vnics(struct bnxt *bp)
3681 {
3682         int i;
3683
3684         for (i = 0; i < bp->nr_vnics; i++) {
3685                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3686                 int j;
3687
3688                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3689                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3690                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3691
3692                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3693
3694                 if (bp->vnic_info[i].rss_hash_key) {
3695                         if (i == 0)
3696                                 prandom_bytes(vnic->rss_hash_key,
3697                                               HW_HASH_KEY_SIZE);
3698                         else
3699                                 memcpy(vnic->rss_hash_key,
3700                                        bp->vnic_info[0].rss_hash_key,
3701                                        HW_HASH_KEY_SIZE);
3702                 }
3703         }
3704 }
3705
3706 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3707 {
3708         int pages;
3709
3710         pages = ring_size / desc_per_pg;
3711
3712         if (!pages)
3713                 return 1;
3714
3715         pages++;
3716
3717         while (pages & (pages - 1))
3718                 pages++;
3719
3720         return pages;
3721 }
3722
3723 void bnxt_set_tpa_flags(struct bnxt *bp)
3724 {
3725         bp->flags &= ~BNXT_FLAG_TPA;
3726         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3727                 return;
3728         if (bp->dev->features & NETIF_F_LRO)
3729                 bp->flags |= BNXT_FLAG_LRO;
3730         else if (bp->dev->features & NETIF_F_GRO_HW)
3731                 bp->flags |= BNXT_FLAG_GRO;
3732 }
3733
3734 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3735  * be set on entry.
3736  */
3737 void bnxt_set_ring_params(struct bnxt *bp)
3738 {
3739         u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3740         u32 agg_factor = 0, agg_ring_size = 0;
3741
3742         /* 8 for CRC and VLAN */
3743         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3744
3745         rx_space = rx_size + NET_SKB_PAD +
3746                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3747
3748         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3749         ring_size = bp->rx_ring_size;
3750         bp->rx_agg_ring_size = 0;
3751         bp->rx_agg_nr_pages = 0;
3752
3753         if (bp->flags & BNXT_FLAG_TPA)
3754                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3755
3756         bp->flags &= ~BNXT_FLAG_JUMBO;
3757         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3758                 u32 jumbo_factor;
3759
3760                 bp->flags |= BNXT_FLAG_JUMBO;
3761                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3762                 if (jumbo_factor > agg_factor)
3763                         agg_factor = jumbo_factor;
3764         }
3765         if (agg_factor) {
3766                 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3767                         ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3768                         netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3769                                     bp->rx_ring_size, ring_size);
3770                         bp->rx_ring_size = ring_size;
3771                 }
3772                 agg_ring_size = ring_size * agg_factor;
3773
3774                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3775                                                         RX_DESC_CNT);
3776                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3777                         u32 tmp = agg_ring_size;
3778
3779                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3780                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3781                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3782                                     tmp, agg_ring_size);
3783                 }
3784                 bp->rx_agg_ring_size = agg_ring_size;
3785                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3786                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3787                 rx_space = rx_size + NET_SKB_PAD +
3788                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3789         }
3790
3791         bp->rx_buf_use_size = rx_size;
3792         bp->rx_buf_size = rx_space;
3793
3794         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3795         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3796
3797         ring_size = bp->tx_ring_size;
3798         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3799         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3800
3801         max_rx_cmpl = bp->rx_ring_size;
3802         /* MAX TPA needs to be added because TPA_START completions are
3803          * immediately recycled, so the TPA completions are not bound by
3804          * the RX ring size.
3805          */
3806         if (bp->flags & BNXT_FLAG_TPA)
3807                 max_rx_cmpl += bp->max_tpa;
3808         /* RX and TPA completions are 32-byte, all others are 16-byte */
3809         ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3810         bp->cp_ring_size = ring_size;
3811
3812         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3813         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3814                 bp->cp_nr_pages = MAX_CP_PAGES;
3815                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3816                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3817                             ring_size, bp->cp_ring_size);
3818         }
3819         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3820         bp->cp_ring_mask = bp->cp_bit - 1;
3821 }
3822
3823 /* Changing allocation mode of RX rings.
3824  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3825  */
3826 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3827 {
3828         if (page_mode) {
3829                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3830                         return -EOPNOTSUPP;
3831                 bp->dev->max_mtu =
3832                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3833                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3834                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3835                 bp->rx_dir = DMA_BIDIRECTIONAL;
3836                 bp->rx_skb_func = bnxt_rx_page_skb;
3837                 /* Disable LRO or GRO_HW */
3838                 netdev_update_features(bp->dev);
3839         } else {
3840                 bp->dev->max_mtu = bp->max_mtu;
3841                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3842                 bp->rx_dir = DMA_FROM_DEVICE;
3843                 bp->rx_skb_func = bnxt_rx_skb;
3844         }
3845         return 0;
3846 }
3847
3848 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3849 {
3850         int i;
3851         struct bnxt_vnic_info *vnic;
3852         struct pci_dev *pdev = bp->pdev;
3853
3854         if (!bp->vnic_info)
3855                 return;
3856
3857         for (i = 0; i < bp->nr_vnics; i++) {
3858                 vnic = &bp->vnic_info[i];
3859
3860                 kfree(vnic->fw_grp_ids);
3861                 vnic->fw_grp_ids = NULL;
3862
3863                 kfree(vnic->uc_list);
3864                 vnic->uc_list = NULL;
3865
3866                 if (vnic->mc_list) {
3867                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3868                                           vnic->mc_list, vnic->mc_list_mapping);
3869                         vnic->mc_list = NULL;
3870                 }
3871
3872                 if (vnic->rss_table) {
3873                         dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3874                                           vnic->rss_table,
3875                                           vnic->rss_table_dma_addr);
3876                         vnic->rss_table = NULL;
3877                 }
3878
3879                 vnic->rss_hash_key = NULL;
3880                 vnic->flags = 0;
3881         }
3882 }
3883
3884 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3885 {
3886         int i, rc = 0, size;
3887         struct bnxt_vnic_info *vnic;
3888         struct pci_dev *pdev = bp->pdev;
3889         int max_rings;
3890
3891         for (i = 0; i < bp->nr_vnics; i++) {
3892                 vnic = &bp->vnic_info[i];
3893
3894                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3895                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3896
3897                         if (mem_size > 0) {
3898                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3899                                 if (!vnic->uc_list) {
3900                                         rc = -ENOMEM;
3901                                         goto out;
3902                                 }
3903                         }
3904                 }
3905
3906                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3907                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3908                         vnic->mc_list =
3909                                 dma_alloc_coherent(&pdev->dev,
3910                                                    vnic->mc_list_size,
3911                                                    &vnic->mc_list_mapping,
3912                                                    GFP_KERNEL);
3913                         if (!vnic->mc_list) {
3914                                 rc = -ENOMEM;
3915                                 goto out;
3916                         }
3917                 }
3918
3919                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3920                         goto vnic_skip_grps;
3921
3922                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3923                         max_rings = bp->rx_nr_rings;
3924                 else
3925                         max_rings = 1;
3926
3927                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3928                 if (!vnic->fw_grp_ids) {
3929                         rc = -ENOMEM;
3930                         goto out;
3931                 }
3932 vnic_skip_grps:
3933                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3934                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3935                         continue;
3936
3937                 /* Allocate rss table and hash key */
3938                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3939                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3940                         size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3941
3942                 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3943                 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3944                                                      vnic->rss_table_size,
3945                                                      &vnic->rss_table_dma_addr,
3946                                                      GFP_KERNEL);
3947                 if (!vnic->rss_table) {
3948                         rc = -ENOMEM;
3949                         goto out;
3950                 }
3951
3952                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3953                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3954         }
3955         return 0;
3956
3957 out:
3958         return rc;
3959 }
3960
3961 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3962 {
3963         struct bnxt_hwrm_wait_token *token;
3964
3965         dma_pool_destroy(bp->hwrm_dma_pool);
3966         bp->hwrm_dma_pool = NULL;
3967
3968         rcu_read_lock();
3969         hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
3970                 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
3971         rcu_read_unlock();
3972 }
3973
3974 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3975 {
3976         bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
3977                                             BNXT_HWRM_DMA_SIZE,
3978                                             BNXT_HWRM_DMA_ALIGN, 0);
3979         if (!bp->hwrm_dma_pool)
3980                 return -ENOMEM;
3981
3982         INIT_HLIST_HEAD(&bp->hwrm_pending_list);
3983
3984         return 0;
3985 }
3986
3987 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3988 {
3989         kfree(stats->hw_masks);
3990         stats->hw_masks = NULL;
3991         kfree(stats->sw_stats);
3992         stats->sw_stats = NULL;
3993         if (stats->hw_stats) {
3994                 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3995                                   stats->hw_stats_map);
3996                 stats->hw_stats = NULL;
3997         }
3998 }
3999
4000 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4001                                 bool alloc_masks)
4002 {
4003         stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4004                                              &stats->hw_stats_map, GFP_KERNEL);
4005         if (!stats->hw_stats)
4006                 return -ENOMEM;
4007
4008         stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4009         if (!stats->sw_stats)
4010                 goto stats_mem_err;
4011
4012         if (alloc_masks) {
4013                 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4014                 if (!stats->hw_masks)
4015                         goto stats_mem_err;
4016         }
4017         return 0;
4018
4019 stats_mem_err:
4020         bnxt_free_stats_mem(bp, stats);
4021         return -ENOMEM;
4022 }
4023
4024 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4025 {
4026         int i;
4027
4028         for (i = 0; i < count; i++)
4029                 mask_arr[i] = mask;
4030 }
4031
4032 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4033 {
4034         int i;
4035
4036         for (i = 0; i < count; i++)
4037                 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4038 }
4039
4040 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4041                                     struct bnxt_stats_mem *stats)
4042 {
4043         struct hwrm_func_qstats_ext_output *resp;
4044         struct hwrm_func_qstats_ext_input *req;
4045         __le64 *hw_masks;
4046         int rc;
4047
4048         if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4049             !(bp->flags & BNXT_FLAG_CHIP_P5))
4050                 return -EOPNOTSUPP;
4051
4052         rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4053         if (rc)
4054                 return rc;
4055
4056         req->fid = cpu_to_le16(0xffff);
4057         req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4058
4059         resp = hwrm_req_hold(bp, req);
4060         rc = hwrm_req_send(bp, req);
4061         if (!rc) {
4062                 hw_masks = &resp->rx_ucast_pkts;
4063                 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4064         }
4065         hwrm_req_drop(bp, req);
4066         return rc;
4067 }
4068
4069 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4070 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4071
4072 static void bnxt_init_stats(struct bnxt *bp)
4073 {
4074         struct bnxt_napi *bnapi = bp->bnapi[0];
4075         struct bnxt_cp_ring_info *cpr;
4076         struct bnxt_stats_mem *stats;
4077         __le64 *rx_stats, *tx_stats;
4078         int rc, rx_count, tx_count;
4079         u64 *rx_masks, *tx_masks;
4080         u64 mask;
4081         u8 flags;
4082
4083         cpr = &bnapi->cp_ring;
4084         stats = &cpr->stats;
4085         rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4086         if (rc) {
4087                 if (bp->flags & BNXT_FLAG_CHIP_P5)
4088                         mask = (1ULL << 48) - 1;
4089                 else
4090                         mask = -1ULL;
4091                 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4092         }
4093         if (bp->flags & BNXT_FLAG_PORT_STATS) {
4094                 stats = &bp->port_stats;
4095                 rx_stats = stats->hw_stats;
4096                 rx_masks = stats->hw_masks;
4097                 rx_count = sizeof(struct rx_port_stats) / 8;
4098                 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4099                 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4100                 tx_count = sizeof(struct tx_port_stats) / 8;
4101
4102                 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4103                 rc = bnxt_hwrm_port_qstats(bp, flags);
4104                 if (rc) {
4105                         mask = (1ULL << 40) - 1;
4106
4107                         bnxt_fill_masks(rx_masks, mask, rx_count);
4108                         bnxt_fill_masks(tx_masks, mask, tx_count);
4109                 } else {
4110                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4111                         bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4112                         bnxt_hwrm_port_qstats(bp, 0);
4113                 }
4114         }
4115         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4116                 stats = &bp->rx_port_stats_ext;
4117                 rx_stats = stats->hw_stats;
4118                 rx_masks = stats->hw_masks;
4119                 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4120                 stats = &bp->tx_port_stats_ext;
4121                 tx_stats = stats->hw_stats;
4122                 tx_masks = stats->hw_masks;
4123                 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4124
4125                 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4126                 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4127                 if (rc) {
4128                         mask = (1ULL << 40) - 1;
4129
4130                         bnxt_fill_masks(rx_masks, mask, rx_count);
4131                         if (tx_stats)
4132                                 bnxt_fill_masks(tx_masks, mask, tx_count);
4133                 } else {
4134                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4135                         if (tx_stats)
4136                                 bnxt_copy_hw_masks(tx_masks, tx_stats,
4137                                                    tx_count);
4138                         bnxt_hwrm_port_qstats_ext(bp, 0);
4139                 }
4140         }
4141 }
4142
4143 static void bnxt_free_port_stats(struct bnxt *bp)
4144 {
4145         bp->flags &= ~BNXT_FLAG_PORT_STATS;
4146         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4147
4148         bnxt_free_stats_mem(bp, &bp->port_stats);
4149         bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4150         bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4151 }
4152
4153 static void bnxt_free_ring_stats(struct bnxt *bp)
4154 {
4155         int i;
4156
4157         if (!bp->bnapi)
4158                 return;
4159
4160         for (i = 0; i < bp->cp_nr_rings; i++) {
4161                 struct bnxt_napi *bnapi = bp->bnapi[i];
4162                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4163
4164                 bnxt_free_stats_mem(bp, &cpr->stats);
4165         }
4166 }
4167
4168 static int bnxt_alloc_stats(struct bnxt *bp)
4169 {
4170         u32 size, i;
4171         int rc;
4172
4173         size = bp->hw_ring_stats_size;
4174
4175         for (i = 0; i < bp->cp_nr_rings; i++) {
4176                 struct bnxt_napi *bnapi = bp->bnapi[i];
4177                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4178
4179                 cpr->stats.len = size;
4180                 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4181                 if (rc)
4182                         return rc;
4183
4184                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4185         }
4186
4187         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4188                 return 0;
4189
4190         if (bp->port_stats.hw_stats)
4191                 goto alloc_ext_stats;
4192
4193         bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4194         rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4195         if (rc)
4196                 return rc;
4197
4198         bp->flags |= BNXT_FLAG_PORT_STATS;
4199
4200 alloc_ext_stats:
4201         /* Display extended statistics only if FW supports it */
4202         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4203                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4204                         return 0;
4205
4206         if (bp->rx_port_stats_ext.hw_stats)
4207                 goto alloc_tx_ext_stats;
4208
4209         bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4210         rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4211         /* Extended stats are optional */
4212         if (rc)
4213                 return 0;
4214
4215 alloc_tx_ext_stats:
4216         if (bp->tx_port_stats_ext.hw_stats)
4217                 return 0;
4218
4219         if (bp->hwrm_spec_code >= 0x10902 ||
4220             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4221                 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4222                 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4223                 /* Extended stats are optional */
4224                 if (rc)
4225                         return 0;
4226         }
4227         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4228         return 0;
4229 }
4230
4231 static void bnxt_clear_ring_indices(struct bnxt *bp)
4232 {
4233         int i;
4234
4235         if (!bp->bnapi)
4236                 return;
4237
4238         for (i = 0; i < bp->cp_nr_rings; i++) {
4239                 struct bnxt_napi *bnapi = bp->bnapi[i];
4240                 struct bnxt_cp_ring_info *cpr;
4241                 struct bnxt_rx_ring_info *rxr;
4242                 struct bnxt_tx_ring_info *txr;
4243
4244                 if (!bnapi)
4245                         continue;
4246
4247                 cpr = &bnapi->cp_ring;
4248                 cpr->cp_raw_cons = 0;
4249
4250                 txr = bnapi->tx_ring;
4251                 if (txr) {
4252                         txr->tx_prod = 0;
4253                         txr->tx_cons = 0;
4254                 }
4255
4256                 rxr = bnapi->rx_ring;
4257                 if (rxr) {
4258                         rxr->rx_prod = 0;
4259                         rxr->rx_agg_prod = 0;
4260                         rxr->rx_sw_agg_prod = 0;
4261                         rxr->rx_next_cons = 0;
4262                 }
4263         }
4264 }
4265
4266 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4267 {
4268 #ifdef CONFIG_RFS_ACCEL
4269         int i;
4270
4271         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
4272          * safe to delete the hash table.
4273          */
4274         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4275                 struct hlist_head *head;
4276                 struct hlist_node *tmp;
4277                 struct bnxt_ntuple_filter *fltr;
4278
4279                 head = &bp->ntp_fltr_hash_tbl[i];
4280                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4281                         hlist_del(&fltr->hash);
4282                         kfree(fltr);
4283                 }
4284         }
4285         if (irq_reinit) {
4286                 kfree(bp->ntp_fltr_bmap);
4287                 bp->ntp_fltr_bmap = NULL;
4288         }
4289         bp->ntp_fltr_count = 0;
4290 #endif
4291 }
4292
4293 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4294 {
4295 #ifdef CONFIG_RFS_ACCEL
4296         int i, rc = 0;
4297
4298         if (!(bp->flags & BNXT_FLAG_RFS))
4299                 return 0;
4300
4301         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4302                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4303
4304         bp->ntp_fltr_count = 0;
4305         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4306                                     sizeof(long),
4307                                     GFP_KERNEL);
4308
4309         if (!bp->ntp_fltr_bmap)
4310                 rc = -ENOMEM;
4311
4312         return rc;
4313 #else
4314         return 0;
4315 #endif
4316 }
4317
4318 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4319 {
4320         bnxt_free_vnic_attributes(bp);
4321         bnxt_free_tx_rings(bp);
4322         bnxt_free_rx_rings(bp);
4323         bnxt_free_cp_rings(bp);
4324         bnxt_free_all_cp_arrays(bp);
4325         bnxt_free_ntp_fltrs(bp, irq_re_init);
4326         if (irq_re_init) {
4327                 bnxt_free_ring_stats(bp);
4328                 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4329                     test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4330                         bnxt_free_port_stats(bp);
4331                 bnxt_free_ring_grps(bp);
4332                 bnxt_free_vnics(bp);
4333                 kfree(bp->tx_ring_map);
4334                 bp->tx_ring_map = NULL;
4335                 kfree(bp->tx_ring);
4336                 bp->tx_ring = NULL;
4337                 kfree(bp->rx_ring);
4338                 bp->rx_ring = NULL;
4339                 kfree(bp->bnapi);
4340                 bp->bnapi = NULL;
4341         } else {
4342                 bnxt_clear_ring_indices(bp);
4343         }
4344 }
4345
4346 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4347 {
4348         int i, j, rc, size, arr_size;
4349         void *bnapi;
4350
4351         if (irq_re_init) {
4352                 /* Allocate bnapi mem pointer array and mem block for
4353                  * all queues
4354                  */
4355                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4356                                 bp->cp_nr_rings);
4357                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4358                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4359                 if (!bnapi)
4360                         return -ENOMEM;
4361
4362                 bp->bnapi = bnapi;
4363                 bnapi += arr_size;
4364                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4365                         bp->bnapi[i] = bnapi;
4366                         bp->bnapi[i]->index = i;
4367                         bp->bnapi[i]->bp = bp;
4368                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4369                                 struct bnxt_cp_ring_info *cpr =
4370                                         &bp->bnapi[i]->cp_ring;
4371
4372                                 cpr->cp_ring_struct.ring_mem.flags =
4373                                         BNXT_RMEM_RING_PTE_FLAG;
4374                         }
4375                 }
4376
4377                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4378                                       sizeof(struct bnxt_rx_ring_info),
4379                                       GFP_KERNEL);
4380                 if (!bp->rx_ring)
4381                         return -ENOMEM;
4382
4383                 for (i = 0; i < bp->rx_nr_rings; i++) {
4384                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4385
4386                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4387                                 rxr->rx_ring_struct.ring_mem.flags =
4388                                         BNXT_RMEM_RING_PTE_FLAG;
4389                                 rxr->rx_agg_ring_struct.ring_mem.flags =
4390                                         BNXT_RMEM_RING_PTE_FLAG;
4391                         }
4392                         rxr->bnapi = bp->bnapi[i];
4393                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4394                 }
4395
4396                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4397                                       sizeof(struct bnxt_tx_ring_info),
4398                                       GFP_KERNEL);
4399                 if (!bp->tx_ring)
4400                         return -ENOMEM;
4401
4402                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4403                                           GFP_KERNEL);
4404
4405                 if (!bp->tx_ring_map)
4406                         return -ENOMEM;
4407
4408                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4409                         j = 0;
4410                 else
4411                         j = bp->rx_nr_rings;
4412
4413                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4414                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4415
4416                         if (bp->flags & BNXT_FLAG_CHIP_P5)
4417                                 txr->tx_ring_struct.ring_mem.flags =
4418                                         BNXT_RMEM_RING_PTE_FLAG;
4419                         txr->bnapi = bp->bnapi[j];
4420                         bp->bnapi[j]->tx_ring = txr;
4421                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4422                         if (i >= bp->tx_nr_rings_xdp) {
4423                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4424                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
4425                         } else {
4426                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4427                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4428                         }
4429                 }
4430
4431                 rc = bnxt_alloc_stats(bp);
4432                 if (rc)
4433                         goto alloc_mem_err;
4434                 bnxt_init_stats(bp);
4435
4436                 rc = bnxt_alloc_ntp_fltrs(bp);
4437                 if (rc)
4438                         goto alloc_mem_err;
4439
4440                 rc = bnxt_alloc_vnics(bp);
4441                 if (rc)
4442                         goto alloc_mem_err;
4443         }
4444
4445         rc = bnxt_alloc_all_cp_arrays(bp);
4446         if (rc)
4447                 goto alloc_mem_err;
4448
4449         bnxt_init_ring_struct(bp);
4450
4451         rc = bnxt_alloc_rx_rings(bp);
4452         if (rc)
4453                 goto alloc_mem_err;
4454
4455         rc = bnxt_alloc_tx_rings(bp);
4456         if (rc)
4457                 goto alloc_mem_err;
4458
4459         rc = bnxt_alloc_cp_rings(bp);
4460         if (rc)
4461                 goto alloc_mem_err;
4462
4463         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4464                                   BNXT_VNIC_UCAST_FLAG;
4465         rc = bnxt_alloc_vnic_attributes(bp);
4466         if (rc)
4467                 goto alloc_mem_err;
4468         return 0;
4469
4470 alloc_mem_err:
4471         bnxt_free_mem(bp, true);
4472         return rc;
4473 }
4474
4475 static void bnxt_disable_int(struct bnxt *bp)
4476 {
4477         int i;
4478
4479         if (!bp->bnapi)
4480                 return;
4481
4482         for (i = 0; i < bp->cp_nr_rings; i++) {
4483                 struct bnxt_napi *bnapi = bp->bnapi[i];
4484                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4485                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4486
4487                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4488                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4489         }
4490 }
4491
4492 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4493 {
4494         struct bnxt_napi *bnapi = bp->bnapi[n];
4495         struct bnxt_cp_ring_info *cpr;
4496
4497         cpr = &bnapi->cp_ring;
4498         return cpr->cp_ring_struct.map_idx;
4499 }
4500
4501 static void bnxt_disable_int_sync(struct bnxt *bp)
4502 {
4503         int i;
4504
4505         if (!bp->irq_tbl)
4506                 return;
4507
4508         atomic_inc(&bp->intr_sem);
4509
4510         bnxt_disable_int(bp);
4511         for (i = 0; i < bp->cp_nr_rings; i++) {
4512                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4513
4514                 synchronize_irq(bp->irq_tbl[map_idx].vector);
4515         }
4516 }
4517
4518 static void bnxt_enable_int(struct bnxt *bp)
4519 {
4520         int i;
4521
4522         atomic_set(&bp->intr_sem, 0);
4523         for (i = 0; i < bp->cp_nr_rings; i++) {
4524                 struct bnxt_napi *bnapi = bp->bnapi[i];
4525                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4526
4527                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4528         }
4529 }
4530
4531 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4532                             bool async_only)
4533 {
4534         DECLARE_BITMAP(async_events_bmap, 256);
4535         u32 *events = (u32 *)async_events_bmap;
4536         struct hwrm_func_drv_rgtr_output *resp;
4537         struct hwrm_func_drv_rgtr_input *req;
4538         u32 flags;
4539         int rc, i;
4540
4541         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4542         if (rc)
4543                 return rc;
4544
4545         req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4546                                    FUNC_DRV_RGTR_REQ_ENABLES_VER |
4547                                    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4548
4549         req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4550         flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4551         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4552                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4553         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4554                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4555                          FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4556         req->flags = cpu_to_le32(flags);
4557         req->ver_maj_8b = DRV_VER_MAJ;
4558         req->ver_min_8b = DRV_VER_MIN;
4559         req->ver_upd_8b = DRV_VER_UPD;
4560         req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4561         req->ver_min = cpu_to_le16(DRV_VER_MIN);
4562         req->ver_upd = cpu_to_le16(DRV_VER_UPD);
4563
4564         if (BNXT_PF(bp)) {
4565                 u32 data[8];
4566                 int i;
4567
4568                 memset(data, 0, sizeof(data));
4569                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4570                         u16 cmd = bnxt_vf_req_snif[i];
4571                         unsigned int bit, idx;
4572
4573                         idx = cmd / 32;
4574                         bit = cmd % 32;
4575                         data[idx] |= 1 << bit;
4576                 }
4577
4578                 for (i = 0; i < 8; i++)
4579                         req->vf_req_fwd[i] = cpu_to_le32(data[i]);
4580
4581                 req->enables |=
4582                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4583         }
4584
4585         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4586                 req->flags |= cpu_to_le32(
4587                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4588
4589         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4590         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4591                 u16 event_id = bnxt_async_events_arr[i];
4592
4593                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4594                     !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4595                         continue;
4596                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4597         }
4598         if (bmap && bmap_size) {
4599                 for (i = 0; i < bmap_size; i++) {
4600                         if (test_bit(i, bmap))
4601                                 __set_bit(i, async_events_bmap);
4602                 }
4603         }
4604         for (i = 0; i < 8; i++)
4605                 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
4606
4607         if (async_only)
4608                 req->enables =
4609                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4610
4611         resp = hwrm_req_hold(bp, req);
4612         rc = hwrm_req_send(bp, req);
4613         if (!rc) {
4614                 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4615                 if (resp->flags &
4616                     cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4617                         bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4618         }
4619         hwrm_req_drop(bp, req);
4620         return rc;
4621 }
4622
4623 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4624 {
4625         struct hwrm_func_drv_unrgtr_input *req;
4626         int rc;
4627
4628         if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4629                 return 0;
4630
4631         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4632         if (rc)
4633                 return rc;
4634         return hwrm_req_send(bp, req);
4635 }
4636
4637 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4638 {
4639         struct hwrm_tunnel_dst_port_free_input *req;
4640         int rc;
4641
4642         rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4643         if (rc)
4644                 return rc;
4645
4646         req->tunnel_type = tunnel_type;
4647
4648         switch (tunnel_type) {
4649         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4650                 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4651                 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4652                 break;
4653         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4654                 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4655                 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4656                 break;
4657         default:
4658                 break;
4659         }
4660
4661         rc = hwrm_req_send(bp, req);
4662         if (rc)
4663                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4664                            rc);
4665         return rc;
4666 }
4667
4668 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4669                                            u8 tunnel_type)
4670 {
4671         struct hwrm_tunnel_dst_port_alloc_output *resp;
4672         struct hwrm_tunnel_dst_port_alloc_input *req;
4673         int rc;
4674
4675         rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4676         if (rc)
4677                 return rc;
4678
4679         req->tunnel_type = tunnel_type;
4680         req->tunnel_dst_port_val = port;
4681
4682         resp = hwrm_req_hold(bp, req);
4683         rc = hwrm_req_send(bp, req);
4684         if (rc) {
4685                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4686                            rc);
4687                 goto err_out;
4688         }
4689
4690         switch (tunnel_type) {
4691         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4692                 bp->vxlan_fw_dst_port_id =
4693                         le16_to_cpu(resp->tunnel_dst_port_id);
4694                 break;
4695         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4696                 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4697                 break;
4698         default:
4699                 break;
4700         }
4701
4702 err_out:
4703         hwrm_req_drop(bp, req);
4704         return rc;
4705 }
4706
4707 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4708 {
4709         struct hwrm_cfa_l2_set_rx_mask_input *req;
4710         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4711         int rc;
4712
4713         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4714         if (rc)
4715                 return rc;
4716
4717         req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4718         req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4719         req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4720         req->mask = cpu_to_le32(vnic->rx_mask);
4721         return hwrm_req_send_silent(bp, req);
4722 }
4723
4724 #ifdef CONFIG_RFS_ACCEL
4725 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4726                                             struct bnxt_ntuple_filter *fltr)
4727 {
4728         struct hwrm_cfa_ntuple_filter_free_input *req;
4729         int rc;
4730
4731         rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4732         if (rc)
4733                 return rc;
4734
4735         req->ntuple_filter_id = fltr->filter_id;
4736         return hwrm_req_send(bp, req);
4737 }
4738
4739 #define BNXT_NTP_FLTR_FLAGS                                     \
4740         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4741          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4742          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4743          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4744          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4745          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4746          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4747          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4748          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4749          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4750          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4751          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4752          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4753          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4754
4755 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4756                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4757
4758 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4759                                              struct bnxt_ntuple_filter *fltr)
4760 {
4761         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4762         struct hwrm_cfa_ntuple_filter_alloc_input *req;
4763         struct flow_keys *keys = &fltr->fkeys;
4764         struct bnxt_vnic_info *vnic;
4765         u32 flags = 0;
4766         int rc;
4767
4768         rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4769         if (rc)
4770                 return rc;
4771
4772         req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4773
4774         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4775                 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4776                 req->dst_id = cpu_to_le16(fltr->rxq);
4777         } else {
4778                 vnic = &bp->vnic_info[fltr->rxq + 1];
4779                 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
4780         }
4781         req->flags = cpu_to_le32(flags);
4782         req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4783
4784         req->ethertype = htons(ETH_P_IP);
4785         memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4786         req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4787         req->ip_protocol = keys->basic.ip_proto;
4788
4789         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4790                 int i;
4791
4792                 req->ethertype = htons(ETH_P_IPV6);
4793                 req->ip_addr_type =
4794                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4795                 *(struct in6_addr *)&req->src_ipaddr[0] =
4796                         keys->addrs.v6addrs.src;
4797                 *(struct in6_addr *)&req->dst_ipaddr[0] =
4798                         keys->addrs.v6addrs.dst;
4799                 for (i = 0; i < 4; i++) {
4800                         req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4801                         req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4802                 }
4803         } else {
4804                 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
4805                 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4806                 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4807                 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4808         }
4809         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4810                 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4811                 req->tunnel_type =
4812                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4813         }
4814
4815         req->src_port = keys->ports.src;
4816         req->src_port_mask = cpu_to_be16(0xffff);
4817         req->dst_port = keys->ports.dst;
4818         req->dst_port_mask = cpu_to_be16(0xffff);
4819
4820         resp = hwrm_req_hold(bp, req);
4821         rc = hwrm_req_send(bp, req);
4822         if (!rc)
4823                 fltr->filter_id = resp->ntuple_filter_id;
4824         hwrm_req_drop(bp, req);
4825         return rc;
4826 }
4827 #endif
4828
4829 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4830                                      u8 *mac_addr)
4831 {
4832         struct hwrm_cfa_l2_filter_alloc_output *resp;
4833         struct hwrm_cfa_l2_filter_alloc_input *req;
4834         int rc;
4835
4836         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
4837         if (rc)
4838                 return rc;
4839
4840         req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4841         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4842                 req->flags |=
4843                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4844         req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4845         req->enables =
4846                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4847                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4848                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4849         memcpy(req->l2_addr, mac_addr, ETH_ALEN);
4850         req->l2_addr_mask[0] = 0xff;
4851         req->l2_addr_mask[1] = 0xff;
4852         req->l2_addr_mask[2] = 0xff;
4853         req->l2_addr_mask[3] = 0xff;
4854         req->l2_addr_mask[4] = 0xff;
4855         req->l2_addr_mask[5] = 0xff;
4856
4857         resp = hwrm_req_hold(bp, req);
4858         rc = hwrm_req_send(bp, req);
4859         if (!rc)
4860                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4861                                                         resp->l2_filter_id;
4862         hwrm_req_drop(bp, req);
4863         return rc;
4864 }
4865
4866 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4867 {
4868         struct hwrm_cfa_l2_filter_free_input *req;
4869         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4870         int rc;
4871
4872         /* Any associated ntuple filters will also be cleared by firmware. */
4873         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
4874         if (rc)
4875                 return rc;
4876         hwrm_req_hold(bp, req);
4877         for (i = 0; i < num_of_vnics; i++) {
4878                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4879
4880                 for (j = 0; j < vnic->uc_filter_count; j++) {
4881                         req->l2_filter_id = vnic->fw_l2_filter_id[j];
4882
4883                         rc = hwrm_req_send(bp, req);
4884                 }
4885                 vnic->uc_filter_count = 0;
4886         }
4887         hwrm_req_drop(bp, req);
4888         return rc;
4889 }
4890
4891 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4892 {
4893         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4894         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4895         struct hwrm_vnic_tpa_cfg_input *req;
4896         int rc;
4897
4898         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4899                 return 0;
4900
4901         rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
4902         if (rc)
4903                 return rc;
4904
4905         if (tpa_flags) {
4906                 u16 mss = bp->dev->mtu - 40;
4907                 u32 nsegs, n, segs = 0, flags;
4908
4909                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4910                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4911                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4912                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4913                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4914                 if (tpa_flags & BNXT_FLAG_GRO)
4915                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4916
4917                 req->flags = cpu_to_le32(flags);
4918
4919                 req->enables =
4920                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4921                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4922                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4923
4924                 /* Number of segs are log2 units, and first packet is not
4925                  * included as part of this units.
4926                  */
4927                 if (mss <= BNXT_RX_PAGE_SIZE) {
4928                         n = BNXT_RX_PAGE_SIZE / mss;
4929                         nsegs = (MAX_SKB_FRAGS - 1) * n;
4930                 } else {
4931                         n = mss / BNXT_RX_PAGE_SIZE;
4932                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
4933                                 n++;
4934                         nsegs = (MAX_SKB_FRAGS - n) / n;
4935                 }
4936
4937                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4938                         segs = MAX_TPA_SEGS_P5;
4939                         max_aggs = bp->max_tpa;
4940                 } else {
4941                         segs = ilog2(nsegs);
4942                 }
4943                 req->max_agg_segs = cpu_to_le16(segs);
4944                 req->max_aggs = cpu_to_le16(max_aggs);
4945
4946                 req->min_agg_len = cpu_to_le32(512);
4947         }
4948         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4949
4950         return hwrm_req_send(bp, req);
4951 }
4952
4953 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4954 {
4955         struct bnxt_ring_grp_info *grp_info;
4956
4957         grp_info = &bp->grp_info[ring->grp_idx];
4958         return grp_info->cp_fw_ring_id;
4959 }
4960
4961 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4962 {
4963         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4964                 struct bnxt_napi *bnapi = rxr->bnapi;
4965                 struct bnxt_cp_ring_info *cpr;
4966
4967                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4968                 return cpr->cp_ring_struct.fw_ring_id;
4969         } else {
4970                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4971         }
4972 }
4973
4974 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4975 {
4976         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4977                 struct bnxt_napi *bnapi = txr->bnapi;
4978                 struct bnxt_cp_ring_info *cpr;
4979
4980                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4981                 return cpr->cp_ring_struct.fw_ring_id;
4982         } else {
4983                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4984         }
4985 }
4986
4987 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
4988 {
4989         int entries;
4990
4991         if (bp->flags & BNXT_FLAG_CHIP_P5)
4992                 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
4993         else
4994                 entries = HW_HASH_INDEX_SIZE;
4995
4996         bp->rss_indir_tbl_entries = entries;
4997         bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
4998                                           GFP_KERNEL);
4999         if (!bp->rss_indir_tbl)
5000                 return -ENOMEM;
5001         return 0;
5002 }
5003
5004 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5005 {
5006         u16 max_rings, max_entries, pad, i;
5007
5008         if (!bp->rx_nr_rings)
5009                 return;
5010
5011         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5012                 max_rings = bp->rx_nr_rings - 1;
5013         else
5014                 max_rings = bp->rx_nr_rings;
5015
5016         max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5017
5018         for (i = 0; i < max_entries; i++)
5019                 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5020
5021         pad = bp->rss_indir_tbl_entries - max_entries;
5022         if (pad)
5023                 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5024 }
5025
5026 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5027 {
5028         u16 i, tbl_size, max_ring = 0;
5029
5030         if (!bp->rss_indir_tbl)
5031                 return 0;
5032
5033         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5034         for (i = 0; i < tbl_size; i++)
5035                 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5036         return max_ring;
5037 }
5038
5039 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5040 {
5041         if (bp->flags & BNXT_FLAG_CHIP_P5)
5042                 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5043         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5044                 return 2;
5045         return 1;
5046 }
5047
5048 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5049 {
5050         bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5051         u16 i, j;
5052
5053         /* Fill the RSS indirection table with ring group ids */
5054         for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5055                 if (!no_rss)
5056                         j = bp->rss_indir_tbl[i];
5057                 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5058         }
5059 }
5060
5061 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5062                                       struct bnxt_vnic_info *vnic)
5063 {
5064         __le16 *ring_tbl = vnic->rss_table;
5065         struct bnxt_rx_ring_info *rxr;
5066         u16 tbl_size, i;
5067
5068         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5069
5070         for (i = 0; i < tbl_size; i++) {
5071                 u16 ring_id, j;
5072
5073                 j = bp->rss_indir_tbl[i];
5074                 rxr = &bp->rx_ring[j];
5075
5076                 ring_id = rxr->rx_ring_struct.fw_ring_id;
5077                 *ring_tbl++ = cpu_to_le16(ring_id);
5078                 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5079                 *ring_tbl++ = cpu_to_le16(ring_id);
5080         }
5081 }
5082
5083 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5084 {
5085         if (bp->flags & BNXT_FLAG_CHIP_P5)
5086                 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5087         else
5088                 __bnxt_fill_hw_rss_tbl(bp, vnic);
5089 }
5090
5091 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5092 {
5093         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5094         struct hwrm_vnic_rss_cfg_input *req;
5095         int rc;
5096
5097         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5098             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5099                 return 0;
5100
5101         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5102         if (rc)
5103                 return rc;
5104
5105         if (set_rss) {
5106                 bnxt_fill_hw_rss_tbl(bp, vnic);
5107                 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5108                 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5109                 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5110                 req->hash_key_tbl_addr =
5111                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
5112         }
5113         req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5114         return hwrm_req_send(bp, req);
5115 }
5116
5117 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5118 {
5119         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5120         struct hwrm_vnic_rss_cfg_input *req;
5121         dma_addr_t ring_tbl_map;
5122         u32 i, nr_ctxs;
5123         int rc;
5124
5125         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5126         if (rc)
5127                 return rc;
5128
5129         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5130         if (!set_rss)
5131                 return hwrm_req_send(bp, req);
5132
5133         bnxt_fill_hw_rss_tbl(bp, vnic);
5134         req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5135         req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5136         req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5137         ring_tbl_map = vnic->rss_table_dma_addr;
5138         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5139
5140         hwrm_req_hold(bp, req);
5141         for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5142                 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5143                 req->ring_table_pair_index = i;
5144                 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5145                 rc = hwrm_req_send(bp, req);
5146                 if (rc)
5147                         goto exit;
5148         }
5149
5150 exit:
5151         hwrm_req_drop(bp, req);
5152         return rc;
5153 }
5154
5155 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5156 {
5157         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5158         struct hwrm_vnic_plcmodes_cfg_input *req;
5159         int rc;
5160
5161         rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5162         if (rc)
5163                 return rc;
5164
5165         req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5166                                  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5167                                  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5168         req->enables =
5169                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5170                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5171         /* thresholds not implemented in firmware yet */
5172         req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5173         req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5174         req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5175         return hwrm_req_send(bp, req);
5176 }
5177
5178 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5179                                         u16 ctx_idx)
5180 {
5181         struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
5182
5183         if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5184                 return;
5185
5186         req->rss_cos_lb_ctx_id =
5187                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5188
5189         hwrm_req_send(bp, req);
5190         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5191 }
5192
5193 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5194 {
5195         int i, j;
5196
5197         for (i = 0; i < bp->nr_vnics; i++) {
5198                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5199
5200                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5201                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5202                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5203                 }
5204         }
5205         bp->rsscos_nr_ctxs = 0;
5206 }
5207
5208 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5209 {
5210         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5211         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
5212         int rc;
5213
5214         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5215         if (rc)
5216                 return rc;
5217
5218         resp = hwrm_req_hold(bp, req);
5219         rc = hwrm_req_send(bp, req);
5220         if (!rc)
5221                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5222                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
5223         hwrm_req_drop(bp, req);
5224
5225         return rc;
5226 }
5227
5228 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5229 {
5230         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5231                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5232         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5233 }
5234
5235 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5236 {
5237         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5238         struct hwrm_vnic_cfg_input *req;
5239         unsigned int ring = 0, grp_idx;
5240         u16 def_vlan = 0;
5241         int rc;
5242
5243         rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5244         if (rc)
5245                 return rc;
5246
5247         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5248                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5249
5250                 req->default_rx_ring_id =
5251                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5252                 req->default_cmpl_ring_id =
5253                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5254                 req->enables =
5255                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5256                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5257                 goto vnic_mru;
5258         }
5259         req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5260         /* Only RSS support for now TBD: COS & LB */
5261         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5262                 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5263                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5264                                            VNIC_CFG_REQ_ENABLES_MRU);
5265         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5266                 req->rss_rule =
5267                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5268                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5269                                            VNIC_CFG_REQ_ENABLES_MRU);
5270                 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5271         } else {
5272                 req->rss_rule = cpu_to_le16(0xffff);
5273         }
5274
5275         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5276             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5277                 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5278                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5279         } else {
5280                 req->cos_rule = cpu_to_le16(0xffff);
5281         }
5282
5283         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5284                 ring = 0;
5285         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5286                 ring = vnic_id - 1;
5287         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5288                 ring = bp->rx_nr_rings - 1;
5289
5290         grp_idx = bp->rx_ring[ring].bnapi->index;
5291         req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5292         req->lb_rule = cpu_to_le16(0xffff);
5293 vnic_mru:
5294         req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5295
5296         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5297 #ifdef CONFIG_BNXT_SRIOV
5298         if (BNXT_VF(bp))
5299                 def_vlan = bp->vf.vlan;
5300 #endif
5301         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5302                 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5303         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5304                 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5305
5306         return hwrm_req_send(bp, req);
5307 }
5308
5309 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5310 {
5311         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5312                 struct hwrm_vnic_free_input *req;
5313
5314                 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5315                         return;
5316
5317                 req->vnic_id =
5318                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5319
5320                 hwrm_req_send(bp, req);
5321                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5322         }
5323 }
5324
5325 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5326 {
5327         u16 i;
5328
5329         for (i = 0; i < bp->nr_vnics; i++)
5330                 bnxt_hwrm_vnic_free_one(bp, i);
5331 }
5332
5333 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5334                                 unsigned int start_rx_ring_idx,
5335                                 unsigned int nr_rings)
5336 {
5337         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5338         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5339         struct hwrm_vnic_alloc_output *resp;
5340         struct hwrm_vnic_alloc_input *req;
5341         int rc;
5342
5343         rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5344         if (rc)
5345                 return rc;
5346
5347         if (bp->flags & BNXT_FLAG_CHIP_P5)
5348                 goto vnic_no_ring_grps;
5349
5350         /* map ring groups to this vnic */
5351         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5352                 grp_idx = bp->rx_ring[i].bnapi->index;
5353                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5354                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5355                                    j, nr_rings);
5356                         break;
5357                 }
5358                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5359         }
5360
5361 vnic_no_ring_grps:
5362         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5363                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5364         if (vnic_id == 0)
5365                 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5366
5367         resp = hwrm_req_hold(bp, req);
5368         rc = hwrm_req_send(bp, req);
5369         if (!rc)
5370                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5371         hwrm_req_drop(bp, req);
5372         return rc;
5373 }
5374
5375 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5376 {
5377         struct hwrm_vnic_qcaps_output *resp;
5378         struct hwrm_vnic_qcaps_input *req;
5379         int rc;
5380
5381         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5382         bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5383         if (bp->hwrm_spec_code < 0x10600)
5384                 return 0;
5385
5386         rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5387         if (rc)
5388                 return rc;
5389
5390         resp = hwrm_req_hold(bp, req);
5391         rc = hwrm_req_send(bp, req);
5392         if (!rc) {
5393                 u32 flags = le32_to_cpu(resp->flags);
5394
5395                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5396                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5397                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5398                 if (flags &
5399                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5400                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5401
5402                 /* Older P5 fw before EXT_HW_STATS support did not set
5403                  * VLAN_STRIP_CAP properly.
5404                  */
5405                 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5406                     (BNXT_CHIP_P5_THOR(bp) &&
5407                      !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5408                         bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5409                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5410                 if (bp->max_tpa_v2) {
5411                         if (BNXT_CHIP_P5_THOR(bp))
5412                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5413                         else
5414                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5415                 }
5416         }
5417         hwrm_req_drop(bp, req);
5418         return rc;
5419 }
5420
5421 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5422 {
5423         struct hwrm_ring_grp_alloc_output *resp;
5424         struct hwrm_ring_grp_alloc_input *req;
5425         int rc;
5426         u16 i;
5427
5428         if (bp->flags & BNXT_FLAG_CHIP_P5)
5429                 return 0;
5430
5431         rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5432         if (rc)
5433                 return rc;
5434
5435         resp = hwrm_req_hold(bp, req);
5436         for (i = 0; i < bp->rx_nr_rings; i++) {
5437                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5438
5439                 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5440                 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5441                 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5442                 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5443
5444                 rc = hwrm_req_send(bp, req);
5445
5446                 if (rc)
5447                         break;
5448
5449                 bp->grp_info[grp_idx].fw_grp_id =
5450                         le32_to_cpu(resp->ring_group_id);
5451         }
5452         hwrm_req_drop(bp, req);
5453         return rc;
5454 }
5455
5456 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5457 {
5458         struct hwrm_ring_grp_free_input *req;
5459         u16 i;
5460
5461         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5462                 return;
5463
5464         if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5465                 return;
5466
5467         hwrm_req_hold(bp, req);
5468         for (i = 0; i < bp->cp_nr_rings; i++) {
5469                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5470                         continue;
5471                 req->ring_group_id =
5472                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
5473
5474                 hwrm_req_send(bp, req);
5475                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5476         }
5477         hwrm_req_drop(bp, req);
5478 }
5479
5480 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5481                                     struct bnxt_ring_struct *ring,
5482                                     u32 ring_type, u32 map_index)
5483 {
5484         struct hwrm_ring_alloc_output *resp;
5485         struct hwrm_ring_alloc_input *req;
5486         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5487         struct bnxt_ring_grp_info *grp_info;
5488         int rc, err = 0;
5489         u16 ring_id;
5490
5491         rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5492         if (rc)
5493                 goto exit;
5494
5495         req->enables = 0;
5496         if (rmem->nr_pages > 1) {
5497                 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5498                 /* Page size is in log2 units */
5499                 req->page_size = BNXT_PAGE_SHIFT;
5500                 req->page_tbl_depth = 1;
5501         } else {
5502                 req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5503         }
5504         req->fbo = 0;
5505         /* Association of ring index with doorbell index and MSIX number */
5506         req->logical_id = cpu_to_le16(map_index);
5507
5508         switch (ring_type) {
5509         case HWRM_RING_ALLOC_TX: {
5510                 struct bnxt_tx_ring_info *txr;
5511
5512                 txr = container_of(ring, struct bnxt_tx_ring_info,
5513                                    tx_ring_struct);
5514                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5515                 /* Association of transmit ring with completion ring */
5516                 grp_info = &bp->grp_info[ring->grp_idx];
5517                 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5518                 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5519                 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5520                 req->queue_id = cpu_to_le16(ring->queue_id);
5521                 break;
5522         }
5523         case HWRM_RING_ALLOC_RX:
5524                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5525                 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
5526                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5527                         u16 flags = 0;
5528
5529                         /* Association of rx ring with stats context */
5530                         grp_info = &bp->grp_info[ring->grp_idx];
5531                         req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5532                         req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5533                         req->enables |= cpu_to_le32(
5534                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5535                         if (NET_IP_ALIGN == 2)
5536                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5537                         req->flags = cpu_to_le16(flags);
5538                 }
5539                 break;
5540         case HWRM_RING_ALLOC_AGG:
5541                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5542                         req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5543                         /* Association of agg ring with rx ring */
5544                         grp_info = &bp->grp_info[ring->grp_idx];
5545                         req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5546                         req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5547                         req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5548                         req->enables |= cpu_to_le32(
5549                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5550                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5551                 } else {
5552                         req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5553                 }
5554                 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5555                 break;
5556         case HWRM_RING_ALLOC_CMPL:
5557                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5558                 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5559                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5560                         /* Association of cp ring with nq */
5561                         grp_info = &bp->grp_info[map_index];
5562                         req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5563                         req->cq_handle = cpu_to_le64(ring->handle);
5564                         req->enables |= cpu_to_le32(
5565                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5566                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5567                         req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5568                 }
5569                 break;
5570         case HWRM_RING_ALLOC_NQ:
5571                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5572                 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5573                 if (bp->flags & BNXT_FLAG_USING_MSIX)
5574                         req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5575                 break;
5576         default:
5577                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5578                            ring_type);
5579                 return -1;
5580         }
5581
5582         resp = hwrm_req_hold(bp, req);
5583         rc = hwrm_req_send(bp, req);
5584         err = le16_to_cpu(resp->error_code);
5585         ring_id = le16_to_cpu(resp->ring_id);
5586         hwrm_req_drop(bp, req);
5587
5588 exit:
5589         if (rc || err) {
5590                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5591                            ring_type, rc, err);
5592                 return -EIO;
5593         }
5594         ring->fw_ring_id = ring_id;
5595         return rc;
5596 }
5597
5598 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5599 {
5600         int rc;
5601
5602         if (BNXT_PF(bp)) {
5603                 struct hwrm_func_cfg_input *req;
5604
5605                 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5606                 if (rc)
5607                         return rc;
5608
5609                 req->fid = cpu_to_le16(0xffff);
5610                 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5611                 req->async_event_cr = cpu_to_le16(idx);
5612                 return hwrm_req_send(bp, req);
5613         } else {
5614                 struct hwrm_func_vf_cfg_input *req;
5615
5616                 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5617                 if (rc)
5618                         return rc;
5619
5620                 req->enables =
5621                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5622                 req->async_event_cr = cpu_to_le16(idx);
5623                 return hwrm_req_send(bp, req);
5624         }
5625 }
5626
5627 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5628                         u32 map_idx, u32 xid)
5629 {
5630         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5631                 if (BNXT_PF(bp))
5632                         db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5633                 else
5634                         db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5635                 switch (ring_type) {
5636                 case HWRM_RING_ALLOC_TX:
5637                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5638                         break;
5639                 case HWRM_RING_ALLOC_RX:
5640                 case HWRM_RING_ALLOC_AGG:
5641                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5642                         break;
5643                 case HWRM_RING_ALLOC_CMPL:
5644                         db->db_key64 = DBR_PATH_L2;
5645                         break;
5646                 case HWRM_RING_ALLOC_NQ:
5647                         db->db_key64 = DBR_PATH_L2;
5648                         break;
5649                 }
5650                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5651         } else {
5652                 db->doorbell = bp->bar1 + map_idx * 0x80;
5653                 switch (ring_type) {
5654                 case HWRM_RING_ALLOC_TX:
5655                         db->db_key32 = DB_KEY_TX;
5656                         break;
5657                 case HWRM_RING_ALLOC_RX:
5658                 case HWRM_RING_ALLOC_AGG:
5659                         db->db_key32 = DB_KEY_RX;
5660                         break;
5661                 case HWRM_RING_ALLOC_CMPL:
5662                         db->db_key32 = DB_KEY_CP;
5663                         break;
5664                 }
5665         }
5666 }
5667
5668 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5669 {
5670         bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5671         int i, rc = 0;
5672         u32 type;
5673
5674         if (bp->flags & BNXT_FLAG_CHIP_P5)
5675                 type = HWRM_RING_ALLOC_NQ;
5676         else
5677                 type = HWRM_RING_ALLOC_CMPL;
5678         for (i = 0; i < bp->cp_nr_rings; i++) {
5679                 struct bnxt_napi *bnapi = bp->bnapi[i];
5680                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5681                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5682                 u32 map_idx = ring->map_idx;
5683                 unsigned int vector;
5684
5685                 vector = bp->irq_tbl[map_idx].vector;
5686                 disable_irq_nosync(vector);
5687                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5688                 if (rc) {
5689                         enable_irq(vector);
5690                         goto err_out;
5691                 }
5692                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5693                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5694                 enable_irq(vector);
5695                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5696
5697                 if (!i) {
5698                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5699                         if (rc)
5700                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5701                 }
5702         }
5703
5704         type = HWRM_RING_ALLOC_TX;
5705         for (i = 0; i < bp->tx_nr_rings; i++) {
5706                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5707                 struct bnxt_ring_struct *ring;
5708                 u32 map_idx;
5709
5710                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5711                         struct bnxt_napi *bnapi = txr->bnapi;
5712                         struct bnxt_cp_ring_info *cpr, *cpr2;
5713                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5714
5715                         cpr = &bnapi->cp_ring;
5716                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5717                         ring = &cpr2->cp_ring_struct;
5718                         ring->handle = BNXT_TX_HDL;
5719                         map_idx = bnapi->index;
5720                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5721                         if (rc)
5722                                 goto err_out;
5723                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5724                                     ring->fw_ring_id);
5725                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5726                 }
5727                 ring = &txr->tx_ring_struct;
5728                 map_idx = i;
5729                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5730                 if (rc)
5731                         goto err_out;
5732                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5733         }
5734
5735         type = HWRM_RING_ALLOC_RX;
5736         for (i = 0; i < bp->rx_nr_rings; i++) {
5737                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5738                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5739                 struct bnxt_napi *bnapi = rxr->bnapi;
5740                 u32 map_idx = bnapi->index;
5741
5742                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5743                 if (rc)
5744                         goto err_out;
5745                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5746                 /* If we have agg rings, post agg buffers first. */
5747                 if (!agg_rings)
5748                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5749                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5750                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5751                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5752                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5753                         struct bnxt_cp_ring_info *cpr2;
5754
5755                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5756                         ring = &cpr2->cp_ring_struct;
5757                         ring->handle = BNXT_RX_HDL;
5758                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5759                         if (rc)
5760                                 goto err_out;
5761                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5762                                     ring->fw_ring_id);
5763                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5764                 }
5765         }
5766
5767         if (agg_rings) {
5768                 type = HWRM_RING_ALLOC_AGG;
5769                 for (i = 0; i < bp->rx_nr_rings; i++) {
5770                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5771                         struct bnxt_ring_struct *ring =
5772                                                 &rxr->rx_agg_ring_struct;
5773                         u32 grp_idx = ring->grp_idx;
5774                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5775
5776                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5777                         if (rc)
5778                                 goto err_out;
5779
5780                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5781                                     ring->fw_ring_id);
5782                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5783                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5784                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5785                 }
5786         }
5787 err_out:
5788         return rc;
5789 }
5790
5791 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5792                                    struct bnxt_ring_struct *ring,
5793                                    u32 ring_type, int cmpl_ring_id)
5794 {
5795         struct hwrm_ring_free_output *resp;
5796         struct hwrm_ring_free_input *req;
5797         u16 error_code = 0;
5798         int rc;
5799
5800         if (BNXT_NO_FW_ACCESS(bp))
5801                 return 0;
5802
5803         rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
5804         if (rc)
5805                 goto exit;
5806
5807         req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
5808         req->ring_type = ring_type;
5809         req->ring_id = cpu_to_le16(ring->fw_ring_id);
5810
5811         resp = hwrm_req_hold(bp, req);
5812         rc = hwrm_req_send(bp, req);
5813         error_code = le16_to_cpu(resp->error_code);
5814         hwrm_req_drop(bp, req);
5815 exit:
5816         if (rc || error_code) {
5817                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5818                            ring_type, rc, error_code);
5819                 return -EIO;
5820         }
5821         return 0;
5822 }
5823
5824 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5825 {
5826         u32 type;
5827         int i;
5828
5829         if (!bp->bnapi)
5830                 return;
5831
5832         for (i = 0; i < bp->tx_nr_rings; i++) {
5833                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5834                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5835
5836                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5837                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5838
5839                         hwrm_ring_free_send_msg(bp, ring,
5840                                                 RING_FREE_REQ_RING_TYPE_TX,
5841                                                 close_path ? cmpl_ring_id :
5842                                                 INVALID_HW_RING_ID);
5843                         ring->fw_ring_id = INVALID_HW_RING_ID;
5844                 }
5845         }
5846
5847         for (i = 0; i < bp->rx_nr_rings; i++) {
5848                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5849                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5850                 u32 grp_idx = rxr->bnapi->index;
5851
5852                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5853                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5854
5855                         hwrm_ring_free_send_msg(bp, ring,
5856                                                 RING_FREE_REQ_RING_TYPE_RX,
5857                                                 close_path ? cmpl_ring_id :
5858                                                 INVALID_HW_RING_ID);
5859                         ring->fw_ring_id = INVALID_HW_RING_ID;
5860                         bp->grp_info[grp_idx].rx_fw_ring_id =
5861                                 INVALID_HW_RING_ID;
5862                 }
5863         }
5864
5865         if (bp->flags & BNXT_FLAG_CHIP_P5)
5866                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5867         else
5868                 type = RING_FREE_REQ_RING_TYPE_RX;
5869         for (i = 0; i < bp->rx_nr_rings; i++) {
5870                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5871                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5872                 u32 grp_idx = rxr->bnapi->index;
5873
5874                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5875                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5876
5877                         hwrm_ring_free_send_msg(bp, ring, type,
5878                                                 close_path ? cmpl_ring_id :
5879                                                 INVALID_HW_RING_ID);
5880                         ring->fw_ring_id = INVALID_HW_RING_ID;
5881                         bp->grp_info[grp_idx].agg_fw_ring_id =
5882                                 INVALID_HW_RING_ID;
5883                 }
5884         }
5885
5886         /* The completion rings are about to be freed.  After that the
5887          * IRQ doorbell will not work anymore.  So we need to disable
5888          * IRQ here.
5889          */
5890         bnxt_disable_int_sync(bp);
5891
5892         if (bp->flags & BNXT_FLAG_CHIP_P5)
5893                 type = RING_FREE_REQ_RING_TYPE_NQ;
5894         else
5895                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5896         for (i = 0; i < bp->cp_nr_rings; i++) {
5897                 struct bnxt_napi *bnapi = bp->bnapi[i];
5898                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5899                 struct bnxt_ring_struct *ring;
5900                 int j;
5901
5902                 for (j = 0; j < 2; j++) {
5903                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5904
5905                         if (cpr2) {
5906                                 ring = &cpr2->cp_ring_struct;
5907                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5908                                         continue;
5909                                 hwrm_ring_free_send_msg(bp, ring,
5910                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
5911                                         INVALID_HW_RING_ID);
5912                                 ring->fw_ring_id = INVALID_HW_RING_ID;
5913                         }
5914                 }
5915                 ring = &cpr->cp_ring_struct;
5916                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5917                         hwrm_ring_free_send_msg(bp, ring, type,
5918                                                 INVALID_HW_RING_ID);
5919                         ring->fw_ring_id = INVALID_HW_RING_ID;
5920                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5921                 }
5922         }
5923 }
5924
5925 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5926                            bool shared);
5927
5928 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5929 {
5930         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5931         struct hwrm_func_qcfg_output *resp;
5932         struct hwrm_func_qcfg_input *req;
5933         int rc;
5934
5935         if (bp->hwrm_spec_code < 0x10601)
5936                 return 0;
5937
5938         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
5939         if (rc)
5940                 return rc;
5941
5942         req->fid = cpu_to_le16(0xffff);
5943         resp = hwrm_req_hold(bp, req);
5944         rc = hwrm_req_send(bp, req);
5945         if (rc) {
5946                 hwrm_req_drop(bp, req);
5947                 return rc;
5948         }
5949
5950         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5951         if (BNXT_NEW_RM(bp)) {
5952                 u16 cp, stats;
5953
5954                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5955                 hw_resc->resv_hw_ring_grps =
5956                         le32_to_cpu(resp->alloc_hw_ring_grps);
5957                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5958                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5959                 stats = le16_to_cpu(resp->alloc_stat_ctx);
5960                 hw_resc->resv_irqs = cp;
5961                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5962                         int rx = hw_resc->resv_rx_rings;
5963                         int tx = hw_resc->resv_tx_rings;
5964
5965                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5966                                 rx >>= 1;
5967                         if (cp < (rx + tx)) {
5968                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5969                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5970                                         rx <<= 1;
5971                                 hw_resc->resv_rx_rings = rx;
5972                                 hw_resc->resv_tx_rings = tx;
5973                         }
5974                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5975                         hw_resc->resv_hw_ring_grps = rx;
5976                 }
5977                 hw_resc->resv_cp_rings = cp;
5978                 hw_resc->resv_stat_ctxs = stats;
5979         }
5980         hwrm_req_drop(bp, req);
5981         return 0;
5982 }
5983
5984 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5985 {
5986         struct hwrm_func_qcfg_output *resp;
5987         struct hwrm_func_qcfg_input *req;
5988         int rc;
5989
5990         if (bp->hwrm_spec_code < 0x10601)
5991                 return 0;
5992
5993         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
5994         if (rc)
5995                 return rc;
5996
5997         req->fid = cpu_to_le16(fid);
5998         resp = hwrm_req_hold(bp, req);
5999         rc = hwrm_req_send(bp, req);
6000         if (!rc)
6001                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6002
6003         hwrm_req_drop(bp, req);
6004         return rc;
6005 }
6006
6007 static bool bnxt_rfs_supported(struct bnxt *bp);
6008
6009 static struct hwrm_func_cfg_input *
6010 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6011                              int ring_grps, int cp_rings, int stats, int vnics)
6012 {
6013         struct hwrm_func_cfg_input *req;
6014         u32 enables = 0;
6015
6016         if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6017                 return NULL;
6018
6019         req->fid = cpu_to_le16(0xffff);
6020         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6021         req->num_tx_rings = cpu_to_le16(tx_rings);
6022         if (BNXT_NEW_RM(bp)) {
6023                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6024                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6025                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6026                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6027                         enables |= tx_rings + ring_grps ?
6028                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6029                         enables |= rx_rings ?
6030                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6031                 } else {
6032                         enables |= cp_rings ?
6033                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6034                         enables |= ring_grps ?
6035                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6036                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6037                 }
6038                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6039
6040                 req->num_rx_rings = cpu_to_le16(rx_rings);
6041                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6042                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6043                         req->num_msix = cpu_to_le16(cp_rings);
6044                         req->num_rsscos_ctxs =
6045                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6046                 } else {
6047                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
6048                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6049                         req->num_rsscos_ctxs = cpu_to_le16(1);
6050                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6051                             bnxt_rfs_supported(bp))
6052                                 req->num_rsscos_ctxs =
6053                                         cpu_to_le16(ring_grps + 1);
6054                 }
6055                 req->num_stat_ctxs = cpu_to_le16(stats);
6056                 req->num_vnics = cpu_to_le16(vnics);
6057         }
6058         req->enables = cpu_to_le32(enables);
6059         return req;
6060 }
6061
6062 static struct hwrm_func_vf_cfg_input *
6063 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6064                              int ring_grps, int cp_rings, int stats, int vnics)
6065 {
6066         struct hwrm_func_vf_cfg_input *req;
6067         u32 enables = 0;
6068
6069         if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6070                 return NULL;
6071
6072         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6073         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6074                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6075         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6076         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6077                 enables |= tx_rings + ring_grps ?
6078                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6079         } else {
6080                 enables |= cp_rings ?
6081                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6082                 enables |= ring_grps ?
6083                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6084         }
6085         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6086         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6087
6088         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6089         req->num_tx_rings = cpu_to_le16(tx_rings);
6090         req->num_rx_rings = cpu_to_le16(rx_rings);
6091         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6092                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6093                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6094         } else {
6095                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6096                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6097                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6098         }
6099         req->num_stat_ctxs = cpu_to_le16(stats);
6100         req->num_vnics = cpu_to_le16(vnics);
6101
6102         req->enables = cpu_to_le32(enables);
6103         return req;
6104 }
6105
6106 static int
6107 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6108                            int ring_grps, int cp_rings, int stats, int vnics)
6109 {
6110         struct hwrm_func_cfg_input *req;
6111         int rc;
6112
6113         req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6114                                            cp_rings, stats, vnics);
6115         if (!req)
6116                 return -ENOMEM;
6117
6118         if (!req->enables) {
6119                 hwrm_req_drop(bp, req);
6120                 return 0;
6121         }
6122
6123         rc = hwrm_req_send(bp, req);
6124         if (rc)
6125                 return rc;
6126
6127         if (bp->hwrm_spec_code < 0x10601)
6128                 bp->hw_resc.resv_tx_rings = tx_rings;
6129
6130         return bnxt_hwrm_get_rings(bp);
6131 }
6132
6133 static int
6134 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6135                            int ring_grps, int cp_rings, int stats, int vnics)
6136 {
6137         struct hwrm_func_vf_cfg_input *req;
6138         int rc;
6139
6140         if (!BNXT_NEW_RM(bp)) {
6141                 bp->hw_resc.resv_tx_rings = tx_rings;
6142                 return 0;
6143         }
6144
6145         req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6146                                            cp_rings, stats, vnics);
6147         if (!req)
6148                 return -ENOMEM;
6149
6150         rc = hwrm_req_send(bp, req);
6151         if (rc)
6152                 return rc;
6153
6154         return bnxt_hwrm_get_rings(bp);
6155 }
6156
6157 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6158                                    int cp, int stat, int vnic)
6159 {
6160         if (BNXT_PF(bp))
6161                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6162                                                   vnic);
6163         else
6164                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6165                                                   vnic);
6166 }
6167
6168 int bnxt_nq_rings_in_use(struct bnxt *bp)
6169 {
6170         int cp = bp->cp_nr_rings;
6171         int ulp_msix, ulp_base;
6172
6173         ulp_msix = bnxt_get_ulp_msix_num(bp);
6174         if (ulp_msix) {
6175                 ulp_base = bnxt_get_ulp_msix_base(bp);
6176                 cp += ulp_msix;
6177                 if ((ulp_base + ulp_msix) > cp)
6178                         cp = ulp_base + ulp_msix;
6179         }
6180         return cp;
6181 }
6182
6183 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6184 {
6185         int cp;
6186
6187         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6188                 return bnxt_nq_rings_in_use(bp);
6189
6190         cp = bp->tx_nr_rings + bp->rx_nr_rings;
6191         return cp;
6192 }
6193
6194 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6195 {
6196         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6197         int cp = bp->cp_nr_rings;
6198
6199         if (!ulp_stat)
6200                 return cp;
6201
6202         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6203                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6204
6205         return cp + ulp_stat;
6206 }
6207
6208 /* Check if a default RSS map needs to be setup.  This function is only
6209  * used on older firmware that does not require reserving RX rings.
6210  */
6211 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6212 {
6213         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6214
6215         /* The RSS map is valid for RX rings set to resv_rx_rings */
6216         if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6217                 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6218                 if (!netif_is_rxfh_configured(bp->dev))
6219                         bnxt_set_dflt_rss_indir_tbl(bp);
6220         }
6221 }
6222
6223 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6224 {
6225         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6226         int cp = bnxt_cp_rings_in_use(bp);
6227         int nq = bnxt_nq_rings_in_use(bp);
6228         int rx = bp->rx_nr_rings, stat;
6229         int vnic = 1, grp = rx;
6230
6231         if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6232             bp->hwrm_spec_code >= 0x10601)
6233                 return true;
6234
6235         /* Old firmware does not need RX ring reservations but we still
6236          * need to setup a default RSS map when needed.  With new firmware
6237          * we go through RX ring reservations first and then set up the
6238          * RSS map for the successfully reserved RX rings when needed.
6239          */
6240         if (!BNXT_NEW_RM(bp)) {
6241                 bnxt_check_rss_tbl_no_rmgr(bp);
6242                 return false;
6243         }
6244         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6245                 vnic = rx + 1;
6246         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6247                 rx <<= 1;
6248         stat = bnxt_get_func_stat_ctxs(bp);
6249         if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6250             hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6251             (hw_resc->resv_hw_ring_grps != grp &&
6252              !(bp->flags & BNXT_FLAG_CHIP_P5)))
6253                 return true;
6254         if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6255             hw_resc->resv_irqs != nq)
6256                 return true;
6257         return false;
6258 }
6259
6260 static int __bnxt_reserve_rings(struct bnxt *bp)
6261 {
6262         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6263         int cp = bnxt_nq_rings_in_use(bp);
6264         int tx = bp->tx_nr_rings;
6265         int rx = bp->rx_nr_rings;
6266         int grp, rx_rings, rc;
6267         int vnic = 1, stat;
6268         bool sh = false;
6269
6270         if (!bnxt_need_reserve_rings(bp))
6271                 return 0;
6272
6273         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6274                 sh = true;
6275         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6276                 vnic = rx + 1;
6277         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6278                 rx <<= 1;
6279         grp = bp->rx_nr_rings;
6280         stat = bnxt_get_func_stat_ctxs(bp);
6281
6282         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6283         if (rc)
6284                 return rc;
6285
6286         tx = hw_resc->resv_tx_rings;
6287         if (BNXT_NEW_RM(bp)) {
6288                 rx = hw_resc->resv_rx_rings;
6289                 cp = hw_resc->resv_irqs;
6290                 grp = hw_resc->resv_hw_ring_grps;
6291                 vnic = hw_resc->resv_vnics;
6292                 stat = hw_resc->resv_stat_ctxs;
6293         }
6294
6295         rx_rings = rx;
6296         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6297                 if (rx >= 2) {
6298                         rx_rings = rx >> 1;
6299                 } else {
6300                         if (netif_running(bp->dev))
6301                                 return -ENOMEM;
6302
6303                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6304                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6305                         bp->dev->hw_features &= ~NETIF_F_LRO;
6306                         bp->dev->features &= ~NETIF_F_LRO;
6307                         bnxt_set_ring_params(bp);
6308                 }
6309         }
6310         rx_rings = min_t(int, rx_rings, grp);
6311         cp = min_t(int, cp, bp->cp_nr_rings);
6312         if (stat > bnxt_get_ulp_stat_ctxs(bp))
6313                 stat -= bnxt_get_ulp_stat_ctxs(bp);
6314         cp = min_t(int, cp, stat);
6315         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6316         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6317                 rx = rx_rings << 1;
6318         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6319         bp->tx_nr_rings = tx;
6320
6321         /* If we cannot reserve all the RX rings, reset the RSS map only
6322          * if absolutely necessary
6323          */
6324         if (rx_rings != bp->rx_nr_rings) {
6325                 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6326                             rx_rings, bp->rx_nr_rings);
6327                 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6328                     (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6329                      bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6330                      bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6331                         netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6332                         bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6333                 }
6334         }
6335         bp->rx_nr_rings = rx_rings;
6336         bp->cp_nr_rings = cp;
6337
6338         if (!tx || !rx || !cp || !grp || !vnic || !stat)
6339                 return -ENOMEM;
6340
6341         if (!netif_is_rxfh_configured(bp->dev))
6342                 bnxt_set_dflt_rss_indir_tbl(bp);
6343
6344         return rc;
6345 }
6346
6347 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6348                                     int ring_grps, int cp_rings, int stats,
6349                                     int vnics)
6350 {
6351         struct hwrm_func_vf_cfg_input *req;
6352         u32 flags;
6353
6354         if (!BNXT_NEW_RM(bp))
6355                 return 0;
6356
6357         req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6358                                            cp_rings, stats, vnics);
6359         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6360                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6361                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6362                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6363                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6364                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6365         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6366                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6367
6368         req->flags = cpu_to_le32(flags);
6369         return hwrm_req_send_silent(bp, req);
6370 }
6371
6372 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6373                                     int ring_grps, int cp_rings, int stats,
6374                                     int vnics)
6375 {
6376         struct hwrm_func_cfg_input *req;
6377         u32 flags;
6378
6379         req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6380                                            cp_rings, stats, vnics);
6381         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6382         if (BNXT_NEW_RM(bp)) {
6383                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6384                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6385                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6386                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6387                 if (bp->flags & BNXT_FLAG_CHIP_P5)
6388                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6389                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6390                 else
6391                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6392         }
6393
6394         req->flags = cpu_to_le32(flags);
6395         return hwrm_req_send_silent(bp, req);
6396 }
6397
6398 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6399                                  int ring_grps, int cp_rings, int stats,
6400                                  int vnics)
6401 {
6402         if (bp->hwrm_spec_code < 0x10801)
6403                 return 0;
6404
6405         if (BNXT_PF(bp))
6406                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6407                                                 ring_grps, cp_rings, stats,
6408                                                 vnics);
6409
6410         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6411                                         cp_rings, stats, vnics);
6412 }
6413
6414 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6415 {
6416         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6417         struct hwrm_ring_aggint_qcaps_output *resp;
6418         struct hwrm_ring_aggint_qcaps_input *req;
6419         int rc;
6420
6421         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6422         coal_cap->num_cmpl_dma_aggr_max = 63;
6423         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6424         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6425         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6426         coal_cap->int_lat_tmr_min_max = 65535;
6427         coal_cap->int_lat_tmr_max_max = 65535;
6428         coal_cap->num_cmpl_aggr_int_max = 65535;
6429         coal_cap->timer_units = 80;
6430
6431         if (bp->hwrm_spec_code < 0x10902)
6432                 return;
6433
6434         if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6435                 return;
6436
6437         resp = hwrm_req_hold(bp, req);
6438         rc = hwrm_req_send_silent(bp, req);
6439         if (!rc) {
6440                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6441                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6442                 coal_cap->num_cmpl_dma_aggr_max =
6443                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6444                 coal_cap->num_cmpl_dma_aggr_during_int_max =
6445                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6446                 coal_cap->cmpl_aggr_dma_tmr_max =
6447                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6448                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6449                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6450                 coal_cap->int_lat_tmr_min_max =
6451                         le16_to_cpu(resp->int_lat_tmr_min_max);
6452                 coal_cap->int_lat_tmr_max_max =
6453                         le16_to_cpu(resp->int_lat_tmr_max_max);
6454                 coal_cap->num_cmpl_aggr_int_max =
6455                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
6456                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6457         }
6458         hwrm_req_drop(bp, req);
6459 }
6460
6461 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6462 {
6463         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6464
6465         return usec * 1000 / coal_cap->timer_units;
6466 }
6467
6468 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6469         struct bnxt_coal *hw_coal,
6470         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6471 {
6472         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6473         u32 cmpl_params = coal_cap->cmpl_params;
6474         u16 val, tmr, max, flags = 0;
6475
6476         max = hw_coal->bufs_per_record * 128;
6477         if (hw_coal->budget)
6478                 max = hw_coal->bufs_per_record * hw_coal->budget;
6479         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6480
6481         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6482         req->num_cmpl_aggr_int = cpu_to_le16(val);
6483
6484         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6485         req->num_cmpl_dma_aggr = cpu_to_le16(val);
6486
6487         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6488                       coal_cap->num_cmpl_dma_aggr_during_int_max);
6489         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6490
6491         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6492         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6493         req->int_lat_tmr_max = cpu_to_le16(tmr);
6494
6495         /* min timer set to 1/2 of interrupt timer */
6496         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6497                 val = tmr / 2;
6498                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6499                 req->int_lat_tmr_min = cpu_to_le16(val);
6500                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6501         }
6502
6503         /* buf timer set to 1/4 of interrupt timer */
6504         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6505         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6506
6507         if (cmpl_params &
6508             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6509                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6510                 val = clamp_t(u16, tmr, 1,
6511                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6512                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6513                 req->enables |=
6514                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6515         }
6516
6517         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6518                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6519         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6520             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6521                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6522         req->flags = cpu_to_le16(flags);
6523         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6524 }
6525
6526 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6527                                    struct bnxt_coal *hw_coal)
6528 {
6529         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
6530         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6531         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6532         u32 nq_params = coal_cap->nq_params;
6533         u16 tmr;
6534         int rc;
6535
6536         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6537                 return 0;
6538
6539         rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6540         if (rc)
6541                 return rc;
6542
6543         req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6544         req->flags =
6545                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6546
6547         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6548         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6549         req->int_lat_tmr_min = cpu_to_le16(tmr);
6550         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6551         return hwrm_req_send(bp, req);
6552 }
6553
6554 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6555 {
6556         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6557         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6558         struct bnxt_coal coal;
6559         int rc;
6560
6561         /* Tick values in micro seconds.
6562          * 1 coal_buf x bufs_per_record = 1 completion record.
6563          */
6564         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6565
6566         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6567         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6568
6569         if (!bnapi->rx_ring)
6570                 return -ENODEV;
6571
6572         rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6573         if (rc)
6574                 return rc;
6575
6576         bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6577
6578         req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6579
6580         return hwrm_req_send(bp, req_rx);
6581 }
6582
6583 int bnxt_hwrm_set_coal(struct bnxt *bp)
6584 {
6585         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6586                                                            *req;
6587         int i, rc;
6588
6589         rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6590         if (rc)
6591                 return rc;
6592
6593         rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6594         if (rc) {
6595                 hwrm_req_drop(bp, req_rx);
6596                 return rc;
6597         }
6598
6599         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6600         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
6601
6602         hwrm_req_hold(bp, req_rx);
6603         hwrm_req_hold(bp, req_tx);
6604         for (i = 0; i < bp->cp_nr_rings; i++) {
6605                 struct bnxt_napi *bnapi = bp->bnapi[i];
6606                 struct bnxt_coal *hw_coal;
6607                 u16 ring_id;
6608
6609                 req = req_rx;
6610                 if (!bnapi->rx_ring) {
6611                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6612                         req = req_tx;
6613                 } else {
6614                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6615                 }
6616                 req->ring_id = cpu_to_le16(ring_id);
6617
6618                 rc = hwrm_req_send(bp, req);
6619                 if (rc)
6620                         break;
6621
6622                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6623                         continue;
6624
6625                 if (bnapi->rx_ring && bnapi->tx_ring) {
6626                         req = req_tx;
6627                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6628                         req->ring_id = cpu_to_le16(ring_id);
6629                         rc = hwrm_req_send(bp, req);
6630                         if (rc)
6631                                 break;
6632                 }
6633                 if (bnapi->rx_ring)
6634                         hw_coal = &bp->rx_coal;
6635                 else
6636                         hw_coal = &bp->tx_coal;
6637                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6638         }
6639         hwrm_req_drop(bp, req_rx);
6640         hwrm_req_drop(bp, req_tx);
6641         return rc;
6642 }
6643
6644 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6645 {
6646         struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6647         struct hwrm_stat_ctx_free_input *req;
6648         int i;
6649
6650         if (!bp->bnapi)
6651                 return;
6652
6653         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6654                 return;
6655
6656         if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6657                 return;
6658         if (BNXT_FW_MAJ(bp) <= 20) {
6659                 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6660                         hwrm_req_drop(bp, req);
6661                         return;
6662                 }
6663                 hwrm_req_hold(bp, req0);
6664         }
6665         hwrm_req_hold(bp, req);
6666         for (i = 0; i < bp->cp_nr_rings; i++) {
6667                 struct bnxt_napi *bnapi = bp->bnapi[i];
6668                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6669
6670                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6671                         req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6672                         if (req0) {
6673                                 req0->stat_ctx_id = req->stat_ctx_id;
6674                                 hwrm_req_send(bp, req0);
6675                         }
6676                         hwrm_req_send(bp, req);
6677
6678                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6679                 }
6680         }
6681         hwrm_req_drop(bp, req);
6682         if (req0)
6683                 hwrm_req_drop(bp, req0);
6684 }
6685
6686 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6687 {
6688         struct hwrm_stat_ctx_alloc_output *resp;
6689         struct hwrm_stat_ctx_alloc_input *req;
6690         int rc, i;
6691
6692         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6693                 return 0;
6694
6695         rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6696         if (rc)
6697                 return rc;
6698
6699         req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6700         req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6701
6702         resp = hwrm_req_hold(bp, req);
6703         for (i = 0; i < bp->cp_nr_rings; i++) {
6704                 struct bnxt_napi *bnapi = bp->bnapi[i];
6705                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6706
6707                 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6708
6709                 rc = hwrm_req_send(bp, req);
6710                 if (rc)
6711                         break;
6712
6713                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6714
6715                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6716         }
6717         hwrm_req_drop(bp, req);
6718         return rc;
6719 }
6720
6721 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6722 {
6723         struct hwrm_func_qcfg_output *resp;
6724         struct hwrm_func_qcfg_input *req;
6725         u32 min_db_offset = 0;
6726         u16 flags;
6727         int rc;
6728
6729         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6730         if (rc)
6731                 return rc;
6732
6733         req->fid = cpu_to_le16(0xffff);
6734         resp = hwrm_req_hold(bp, req);
6735         rc = hwrm_req_send(bp, req);
6736         if (rc)
6737                 goto func_qcfg_exit;
6738
6739 #ifdef CONFIG_BNXT_SRIOV
6740         if (BNXT_VF(bp)) {
6741                 struct bnxt_vf_info *vf = &bp->vf;
6742
6743                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6744         } else {
6745                 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6746         }
6747 #endif
6748         flags = le16_to_cpu(resp->flags);
6749         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6750                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6751                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6752                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6753                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6754         }
6755         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6756                 bp->flags |= BNXT_FLAG_MULTI_HOST;
6757         if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6758                 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6759
6760         switch (resp->port_partition_type) {
6761         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6762         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6763         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6764                 bp->port_partition_type = resp->port_partition_type;
6765                 break;
6766         }
6767         if (bp->hwrm_spec_code < 0x10707 ||
6768             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6769                 bp->br_mode = BRIDGE_MODE_VEB;
6770         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6771                 bp->br_mode = BRIDGE_MODE_VEPA;
6772         else
6773                 bp->br_mode = BRIDGE_MODE_UNDEF;
6774
6775         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6776         if (!bp->max_mtu)
6777                 bp->max_mtu = BNXT_MAX_MTU;
6778
6779         if (bp->db_size)
6780                 goto func_qcfg_exit;
6781
6782         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6783                 if (BNXT_PF(bp))
6784                         min_db_offset = DB_PF_OFFSET_P5;
6785                 else
6786                         min_db_offset = DB_VF_OFFSET_P5;
6787         }
6788         bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6789                                  1024);
6790         if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6791             bp->db_size <= min_db_offset)
6792                 bp->db_size = pci_resource_len(bp->pdev, 2);
6793
6794 func_qcfg_exit:
6795         hwrm_req_drop(bp, req);
6796         return rc;
6797 }
6798
6799 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6800                         struct hwrm_func_backing_store_qcaps_output *resp)
6801 {
6802         struct bnxt_mem_init *mem_init;
6803         u16 init_mask;
6804         u8 init_val;
6805         u8 *offset;
6806         int i;
6807
6808         init_val = resp->ctx_kind_initializer;
6809         init_mask = le16_to_cpu(resp->ctx_init_mask);
6810         offset = &resp->qp_init_offset;
6811         mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6812         for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
6813                 mem_init->init_val = init_val;
6814                 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6815                 if (!init_mask)
6816                         continue;
6817                 if (i == BNXT_CTX_MEM_INIT_STAT)
6818                         offset = &resp->stat_init_offset;
6819                 if (init_mask & (1 << i))
6820                         mem_init->offset = *offset * 4;
6821                 else
6822                         mem_init->init_val = 0;
6823         }
6824         ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6825         ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6826         ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6827         ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6828         ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6829         ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
6830 }
6831
6832 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6833 {
6834         struct hwrm_func_backing_store_qcaps_output *resp;
6835         struct hwrm_func_backing_store_qcaps_input *req;
6836         int rc;
6837
6838         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6839                 return 0;
6840
6841         rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
6842         if (rc)
6843                 return rc;
6844
6845         resp = hwrm_req_hold(bp, req);
6846         rc = hwrm_req_send_silent(bp, req);
6847         if (!rc) {
6848                 struct bnxt_ctx_pg_info *ctx_pg;
6849                 struct bnxt_ctx_mem_info *ctx;
6850                 int i, tqm_rings;
6851
6852                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6853                 if (!ctx) {
6854                         rc = -ENOMEM;
6855                         goto ctx_err;
6856                 }
6857                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6858                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6859                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6860                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6861                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6862                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6863                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6864                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6865                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6866                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6867                 ctx->vnic_max_vnic_entries =
6868                         le16_to_cpu(resp->vnic_max_vnic_entries);
6869                 ctx->vnic_max_ring_table_entries =
6870                         le16_to_cpu(resp->vnic_max_ring_table_entries);
6871                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6872                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6873                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6874                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6875                 ctx->tqm_min_entries_per_ring =
6876                         le32_to_cpu(resp->tqm_min_entries_per_ring);
6877                 ctx->tqm_max_entries_per_ring =
6878                         le32_to_cpu(resp->tqm_max_entries_per_ring);
6879                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6880                 if (!ctx->tqm_entries_multiple)
6881                         ctx->tqm_entries_multiple = 1;
6882                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6883                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6884                 ctx->mrav_num_entries_units =
6885                         le16_to_cpu(resp->mrav_num_entries_units);
6886                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6887                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6888
6889                 bnxt_init_ctx_initializer(ctx, resp);
6890
6891                 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6892                 if (!ctx->tqm_fp_rings_count)
6893                         ctx->tqm_fp_rings_count = bp->max_q;
6894                 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6895                         ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
6896
6897                 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
6898                 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6899                 if (!ctx_pg) {
6900                         kfree(ctx);
6901                         rc = -ENOMEM;
6902                         goto ctx_err;
6903                 }
6904                 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6905                         ctx->tqm_mem[i] = ctx_pg;
6906                 bp->ctx = ctx;
6907         } else {
6908                 rc = 0;
6909         }
6910 ctx_err:
6911         hwrm_req_drop(bp, req);
6912         return rc;
6913 }
6914
6915 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6916                                   __le64 *pg_dir)
6917 {
6918         if (!rmem->nr_pages)
6919                 return;
6920
6921         BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
6922         if (rmem->depth >= 1) {
6923                 if (rmem->depth == 2)
6924                         *pg_attr |= 2;
6925                 else
6926                         *pg_attr |= 1;
6927                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6928         } else {
6929                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6930         }
6931 }
6932
6933 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
6934         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
6935          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
6936          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
6937          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
6938          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6939
6940 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6941 {
6942         struct hwrm_func_backing_store_cfg_input *req;
6943         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6944         struct bnxt_ctx_pg_info *ctx_pg;
6945         void **__req = (void **)&req;
6946         u32 req_len = sizeof(*req);
6947         __le32 *num_entries;
6948         __le64 *pg_dir;
6949         u32 flags = 0;
6950         u8 *pg_attr;
6951         u32 ena;
6952         int rc;
6953         int i;
6954
6955         if (!ctx)
6956                 return 0;
6957
6958         if (req_len > bp->hwrm_max_ext_req_len)
6959                 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
6960         rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
6961         if (rc)
6962                 return rc;
6963
6964         req->enables = cpu_to_le32(enables);
6965         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6966                 ctx_pg = &ctx->qp_mem;
6967                 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
6968                 req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6969                 req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6970                 req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6971                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6972                                       &req->qpc_pg_size_qpc_lvl,
6973                                       &req->qpc_page_dir);
6974         }
6975         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6976                 ctx_pg = &ctx->srq_mem;
6977                 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
6978                 req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6979                 req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6980                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6981                                       &req->srq_pg_size_srq_lvl,
6982                                       &req->srq_page_dir);
6983         }
6984         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6985                 ctx_pg = &ctx->cq_mem;
6986                 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
6987                 req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6988                 req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6989                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6990                                       &req->cq_pg_size_cq_lvl,
6991                                       &req->cq_page_dir);
6992         }
6993         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6994                 ctx_pg = &ctx->vnic_mem;
6995                 req->vnic_num_vnic_entries =
6996                         cpu_to_le16(ctx->vnic_max_vnic_entries);
6997                 req->vnic_num_ring_table_entries =
6998                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
6999                 req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7000                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7001                                       &req->vnic_pg_size_vnic_lvl,
7002                                       &req->vnic_page_dir);
7003         }
7004         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7005                 ctx_pg = &ctx->stat_mem;
7006                 req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7007                 req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7008                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7009                                       &req->stat_pg_size_stat_lvl,
7010                                       &req->stat_page_dir);
7011         }
7012         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7013                 ctx_pg = &ctx->mrav_mem;
7014                 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7015                 if (ctx->mrav_num_entries_units)
7016                         flags |=
7017                         FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7018                 req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7019                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7020                                       &req->mrav_pg_size_mrav_lvl,
7021                                       &req->mrav_page_dir);
7022         }
7023         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7024                 ctx_pg = &ctx->tim_mem;
7025                 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7026                 req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7027                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7028                                       &req->tim_pg_size_tim_lvl,
7029                                       &req->tim_page_dir);
7030         }
7031         for (i = 0, num_entries = &req->tqm_sp_num_entries,
7032              pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7033              pg_dir = &req->tqm_sp_page_dir,
7034              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7035              i < BNXT_MAX_TQM_RINGS;
7036              i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7037                 if (!(enables & ena))
7038                         continue;
7039
7040                 req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7041                 ctx_pg = ctx->tqm_mem[i];
7042                 *num_entries = cpu_to_le32(ctx_pg->entries);
7043                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7044         }
7045         req->flags = cpu_to_le32(flags);
7046         return hwrm_req_send(bp, req);
7047 }
7048
7049 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7050                                   struct bnxt_ctx_pg_info *ctx_pg)
7051 {
7052         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7053
7054         rmem->page_size = BNXT_PAGE_SIZE;
7055         rmem->pg_arr = ctx_pg->ctx_pg_arr;
7056         rmem->dma_arr = ctx_pg->ctx_dma_arr;
7057         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7058         if (rmem->depth >= 1)
7059                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7060         return bnxt_alloc_ring(bp, rmem);
7061 }
7062
7063 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7064                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7065                                   u8 depth, struct bnxt_mem_init *mem_init)
7066 {
7067         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7068         int rc;
7069
7070         if (!mem_size)
7071                 return -EINVAL;
7072
7073         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7074         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7075                 ctx_pg->nr_pages = 0;
7076                 return -EINVAL;
7077         }
7078         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7079                 int nr_tbls, i;
7080
7081                 rmem->depth = 2;
7082                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7083                                              GFP_KERNEL);
7084                 if (!ctx_pg->ctx_pg_tbl)
7085                         return -ENOMEM;
7086                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7087                 rmem->nr_pages = nr_tbls;
7088                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7089                 if (rc)
7090                         return rc;
7091                 for (i = 0; i < nr_tbls; i++) {
7092                         struct bnxt_ctx_pg_info *pg_tbl;
7093
7094                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7095                         if (!pg_tbl)
7096                                 return -ENOMEM;
7097                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7098                         rmem = &pg_tbl->ring_mem;
7099                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7100                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7101                         rmem->depth = 1;
7102                         rmem->nr_pages = MAX_CTX_PAGES;
7103                         rmem->mem_init = mem_init;
7104                         if (i == (nr_tbls - 1)) {
7105                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7106
7107                                 if (rem)
7108                                         rmem->nr_pages = rem;
7109                         }
7110                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7111                         if (rc)
7112                                 break;
7113                 }
7114         } else {
7115                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7116                 if (rmem->nr_pages > 1 || depth)
7117                         rmem->depth = 1;
7118                 rmem->mem_init = mem_init;
7119                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7120         }
7121         return rc;
7122 }
7123
7124 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7125                                   struct bnxt_ctx_pg_info *ctx_pg)
7126 {
7127         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7128
7129         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7130             ctx_pg->ctx_pg_tbl) {
7131                 int i, nr_tbls = rmem->nr_pages;
7132
7133                 for (i = 0; i < nr_tbls; i++) {
7134                         struct bnxt_ctx_pg_info *pg_tbl;
7135                         struct bnxt_ring_mem_info *rmem2;
7136
7137                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
7138                         if (!pg_tbl)
7139                                 continue;
7140                         rmem2 = &pg_tbl->ring_mem;
7141                         bnxt_free_ring(bp, rmem2);
7142                         ctx_pg->ctx_pg_arr[i] = NULL;
7143                         kfree(pg_tbl);
7144                         ctx_pg->ctx_pg_tbl[i] = NULL;
7145                 }
7146                 kfree(ctx_pg->ctx_pg_tbl);
7147                 ctx_pg->ctx_pg_tbl = NULL;
7148         }
7149         bnxt_free_ring(bp, rmem);
7150         ctx_pg->nr_pages = 0;
7151 }
7152
7153 static void bnxt_free_ctx_mem(struct bnxt *bp)
7154 {
7155         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7156         int i;
7157
7158         if (!ctx)
7159                 return;
7160
7161         if (ctx->tqm_mem[0]) {
7162                 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7163                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7164                 kfree(ctx->tqm_mem[0]);
7165                 ctx->tqm_mem[0] = NULL;
7166         }
7167
7168         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7169         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7170         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7171         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7172         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7173         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7174         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7175         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7176 }
7177
7178 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7179 {
7180         struct bnxt_ctx_pg_info *ctx_pg;
7181         struct bnxt_ctx_mem_info *ctx;
7182         struct bnxt_mem_init *init;
7183         u32 mem_size, ena, entries;
7184         u32 entries_sp, min;
7185         u32 num_mr, num_ah;
7186         u32 extra_srqs = 0;
7187         u32 extra_qps = 0;
7188         u8 pg_lvl = 1;
7189         int i, rc;
7190
7191         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7192         if (rc) {
7193                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7194                            rc);
7195                 return rc;
7196         }
7197         ctx = bp->ctx;
7198         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7199                 return 0;
7200
7201         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7202                 pg_lvl = 2;
7203                 extra_qps = 65536;
7204                 extra_srqs = 8192;
7205         }
7206
7207         ctx_pg = &ctx->qp_mem;
7208         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7209                           extra_qps;
7210         if (ctx->qp_entry_size) {
7211                 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7212                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7213                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7214                 if (rc)
7215                         return rc;
7216         }
7217
7218         ctx_pg = &ctx->srq_mem;
7219         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7220         if (ctx->srq_entry_size) {
7221                 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7222                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7223                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7224                 if (rc)
7225                         return rc;
7226         }
7227
7228         ctx_pg = &ctx->cq_mem;
7229         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7230         if (ctx->cq_entry_size) {
7231                 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7232                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7233                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7234                 if (rc)
7235                         return rc;
7236         }
7237
7238         ctx_pg = &ctx->vnic_mem;
7239         ctx_pg->entries = ctx->vnic_max_vnic_entries +
7240                           ctx->vnic_max_ring_table_entries;
7241         if (ctx->vnic_entry_size) {
7242                 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7243                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7244                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7245                 if (rc)
7246                         return rc;
7247         }
7248
7249         ctx_pg = &ctx->stat_mem;
7250         ctx_pg->entries = ctx->stat_max_entries;
7251         if (ctx->stat_entry_size) {
7252                 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7253                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7254                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7255                 if (rc)
7256                         return rc;
7257         }
7258
7259         ena = 0;
7260         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7261                 goto skip_rdma;
7262
7263         ctx_pg = &ctx->mrav_mem;
7264         /* 128K extra is needed to accommodate static AH context
7265          * allocation by f/w.
7266          */
7267         num_mr = 1024 * 256;
7268         num_ah = 1024 * 128;
7269         ctx_pg->entries = num_mr + num_ah;
7270         if (ctx->mrav_entry_size) {
7271                 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7272                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7273                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7274                 if (rc)
7275                         return rc;
7276         }
7277         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7278         if (ctx->mrav_num_entries_units)
7279                 ctx_pg->entries =
7280                         ((num_mr / ctx->mrav_num_entries_units) << 16) |
7281                          (num_ah / ctx->mrav_num_entries_units);
7282
7283         ctx_pg = &ctx->tim_mem;
7284         ctx_pg->entries = ctx->qp_mem.entries;
7285         if (ctx->tim_entry_size) {
7286                 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7287                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7288                 if (rc)
7289                         return rc;
7290         }
7291         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7292
7293 skip_rdma:
7294         min = ctx->tqm_min_entries_per_ring;
7295         entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7296                      2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7297         entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7298         entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7299         entries = roundup(entries, ctx->tqm_entries_multiple);
7300         entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7301         for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7302                 ctx_pg = ctx->tqm_mem[i];
7303                 ctx_pg->entries = i ? entries : entries_sp;
7304                 if (ctx->tqm_entry_size) {
7305                         mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7306                         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7307                                                     NULL);
7308                         if (rc)
7309                                 return rc;
7310                 }
7311                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7312         }
7313         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7314         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7315         if (rc) {
7316                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7317                            rc);
7318                 return rc;
7319         }
7320         ctx->flags |= BNXT_CTX_FLAG_INITED;
7321         return 0;
7322 }
7323
7324 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7325 {
7326         struct hwrm_func_resource_qcaps_output *resp;
7327         struct hwrm_func_resource_qcaps_input *req;
7328         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7329         int rc;
7330
7331         rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7332         if (rc)
7333                 return rc;
7334
7335         req->fid = cpu_to_le16(0xffff);
7336         resp = hwrm_req_hold(bp, req);
7337         rc = hwrm_req_send_silent(bp, req);
7338         if (rc)
7339                 goto hwrm_func_resc_qcaps_exit;
7340
7341         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7342         if (!all)
7343                 goto hwrm_func_resc_qcaps_exit;
7344
7345         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7346         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7347         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7348         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7349         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7350         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7351         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7352         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7353         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7354         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7355         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7356         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7357         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7358         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7359         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7360         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7361
7362         if (bp->flags & BNXT_FLAG_CHIP_P5) {
7363                 u16 max_msix = le16_to_cpu(resp->max_msix);
7364
7365                 hw_resc->max_nqs = max_msix;
7366                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7367         }
7368
7369         if (BNXT_PF(bp)) {
7370                 struct bnxt_pf_info *pf = &bp->pf;
7371
7372                 pf->vf_resv_strategy =
7373                         le16_to_cpu(resp->vf_reservation_strategy);
7374                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7375                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7376         }
7377 hwrm_func_resc_qcaps_exit:
7378         hwrm_req_drop(bp, req);
7379         return rc;
7380 }
7381
7382 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7383 {
7384         struct hwrm_port_mac_ptp_qcfg_output *resp;
7385         struct hwrm_port_mac_ptp_qcfg_input *req;
7386         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7387         u8 flags;
7388         int rc;
7389
7390         if (bp->hwrm_spec_code < 0x10801) {
7391                 rc = -ENODEV;
7392                 goto no_ptp;
7393         }
7394
7395         rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
7396         if (rc)
7397                 goto no_ptp;
7398
7399         req->port_id = cpu_to_le16(bp->pf.port_id);
7400         resp = hwrm_req_hold(bp, req);
7401         rc = hwrm_req_send(bp, req);
7402         if (rc)
7403                 goto exit;
7404
7405         flags = resp->flags;
7406         if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7407                 rc = -ENODEV;
7408                 goto exit;
7409         }
7410         if (!ptp) {
7411                 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7412                 if (!ptp) {
7413                         rc = -ENOMEM;
7414                         goto exit;
7415                 }
7416                 ptp->bp = bp;
7417                 bp->ptp_cfg = ptp;
7418         }
7419         if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7420                 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7421                 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7422         } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7423                 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7424                 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7425         } else {
7426                 rc = -ENODEV;
7427                 goto exit;
7428         }
7429         rc = bnxt_ptp_init(bp);
7430         if (rc)
7431                 netdev_warn(bp->dev, "PTP initialization failed.\n");
7432 exit:
7433         hwrm_req_drop(bp, req);
7434         if (!rc)
7435                 return 0;
7436
7437 no_ptp:
7438         bnxt_ptp_clear(bp);
7439         kfree(ptp);
7440         bp->ptp_cfg = NULL;
7441         return rc;
7442 }
7443
7444 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7445 {
7446         struct hwrm_func_qcaps_output *resp;
7447         struct hwrm_func_qcaps_input *req;
7448         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7449         u32 flags, flags_ext;
7450         int rc;
7451
7452         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7453         if (rc)
7454                 return rc;
7455
7456         req->fid = cpu_to_le16(0xffff);
7457         resp = hwrm_req_hold(bp, req);
7458         rc = hwrm_req_send(bp, req);
7459         if (rc)
7460                 goto hwrm_func_qcaps_exit;
7461
7462         flags = le32_to_cpu(resp->flags);
7463         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7464                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7465         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7466                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7467         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7468                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7469         if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7470                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7471         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7472                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7473         if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7474                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7475         if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7476                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7477         if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7478                 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7479
7480         flags_ext = le32_to_cpu(resp->flags_ext);
7481         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7482                 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7483         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7484                 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
7485
7486         bp->tx_push_thresh = 0;
7487         if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7488             BNXT_FW_MAJ(bp) > 217)
7489                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7490
7491         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7492         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7493         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7494         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7495         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7496         if (!hw_resc->max_hw_ring_grps)
7497                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7498         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7499         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7500         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7501
7502         if (BNXT_PF(bp)) {
7503                 struct bnxt_pf_info *pf = &bp->pf;
7504
7505                 pf->fw_fid = le16_to_cpu(resp->fid);
7506                 pf->port_id = le16_to_cpu(resp->port_id);
7507                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7508                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7509                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7510                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7511                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7512                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7513                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7514                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7515                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7516                 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7517                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7518                         bp->flags |= BNXT_FLAG_WOL_CAP;
7519                 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7520                         __bnxt_hwrm_ptp_qcfg(bp);
7521                 } else {
7522                         bnxt_ptp_clear(bp);
7523                         kfree(bp->ptp_cfg);
7524                         bp->ptp_cfg = NULL;
7525                 }
7526         } else {
7527 #ifdef CONFIG_BNXT_SRIOV
7528                 struct bnxt_vf_info *vf = &bp->vf;
7529
7530                 vf->fw_fid = le16_to_cpu(resp->fid);
7531                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7532 #endif
7533         }
7534
7535 hwrm_func_qcaps_exit:
7536         hwrm_req_drop(bp, req);
7537         return rc;
7538 }
7539
7540 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7541
7542 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7543 {
7544         int rc;
7545
7546         rc = __bnxt_hwrm_func_qcaps(bp);
7547         if (rc)
7548                 return rc;
7549         rc = bnxt_hwrm_queue_qportcfg(bp);
7550         if (rc) {
7551                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7552                 return rc;
7553         }
7554         if (bp->hwrm_spec_code >= 0x10803) {
7555                 rc = bnxt_alloc_ctx_mem(bp);
7556                 if (rc)
7557                         return rc;
7558                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7559                 if (!rc)
7560                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7561         }
7562         return 0;
7563 }
7564
7565 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7566 {
7567         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7568         struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
7569         u32 flags;
7570         int rc;
7571
7572         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7573                 return 0;
7574
7575         rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7576         if (rc)
7577                 return rc;
7578
7579         resp = hwrm_req_hold(bp, req);
7580         rc = hwrm_req_send(bp, req);
7581         if (rc)
7582                 goto hwrm_cfa_adv_qcaps_exit;
7583
7584         flags = le32_to_cpu(resp->flags);
7585         if (flags &
7586             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7587                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7588
7589 hwrm_cfa_adv_qcaps_exit:
7590         hwrm_req_drop(bp, req);
7591         return rc;
7592 }
7593
7594 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7595 {
7596         if (bp->fw_health)
7597                 return 0;
7598
7599         bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7600         if (!bp->fw_health)
7601                 return -ENOMEM;
7602
7603         return 0;
7604 }
7605
7606 static int bnxt_alloc_fw_health(struct bnxt *bp)
7607 {
7608         int rc;
7609
7610         if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7611             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7612                 return 0;
7613
7614         rc = __bnxt_alloc_fw_health(bp);
7615         if (rc) {
7616                 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7617                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7618                 return rc;
7619         }
7620
7621         return 0;
7622 }
7623
7624 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7625 {
7626         writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7627                                          BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7628                                          BNXT_FW_HEALTH_WIN_MAP_OFF);
7629 }
7630
7631 bool bnxt_is_fw_healthy(struct bnxt *bp)
7632 {
7633         if (bp->fw_health && bp->fw_health->status_reliable) {
7634                 u32 fw_status;
7635
7636                 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7637                 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7638                         return false;
7639         }
7640
7641         return true;
7642 }
7643
7644 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7645 {
7646         struct bnxt_fw_health *fw_health = bp->fw_health;
7647         u32 reg_type;
7648
7649         if (!fw_health || !fw_health->status_reliable)
7650                 return;
7651
7652         reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7653         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7654                 fw_health->status_reliable = false;
7655 }
7656
7657 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7658 {
7659         void __iomem *hs;
7660         u32 status_loc;
7661         u32 reg_type;
7662         u32 sig;
7663
7664         if (bp->fw_health)
7665                 bp->fw_health->status_reliable = false;
7666
7667         __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7668         hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7669
7670         sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7671         if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7672                 if (!bp->chip_num) {
7673                         __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7674                         bp->chip_num = readl(bp->bar0 +
7675                                              BNXT_FW_HEALTH_WIN_BASE +
7676                                              BNXT_GRC_REG_CHIP_NUM);
7677                 }
7678                 if (!BNXT_CHIP_P5(bp))
7679                         return;
7680
7681                 status_loc = BNXT_GRC_REG_STATUS_P5 |
7682                              BNXT_FW_HEALTH_REG_TYPE_BAR0;
7683         } else {
7684                 status_loc = readl(hs + offsetof(struct hcomm_status,
7685                                                  fw_status_loc));
7686         }
7687
7688         if (__bnxt_alloc_fw_health(bp)) {
7689                 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7690                 return;
7691         }
7692
7693         bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7694         reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7695         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7696                 __bnxt_map_fw_health_reg(bp, status_loc);
7697                 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7698                         BNXT_FW_HEALTH_WIN_OFF(status_loc);
7699         }
7700
7701         bp->fw_health->status_reliable = true;
7702 }
7703
7704 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7705 {
7706         struct bnxt_fw_health *fw_health = bp->fw_health;
7707         u32 reg_base = 0xffffffff;
7708         int i;
7709
7710         bp->fw_health->status_reliable = false;
7711         /* Only pre-map the monitoring GRC registers using window 3 */
7712         for (i = 0; i < 4; i++) {
7713                 u32 reg = fw_health->regs[i];
7714
7715                 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7716                         continue;
7717                 if (reg_base == 0xffffffff)
7718                         reg_base = reg & BNXT_GRC_BASE_MASK;
7719                 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7720                         return -ERANGE;
7721                 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7722         }
7723         bp->fw_health->status_reliable = true;
7724         if (reg_base == 0xffffffff)
7725                 return 0;
7726
7727         __bnxt_map_fw_health_reg(bp, reg_base);
7728         return 0;
7729 }
7730
7731 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7732 {
7733         struct bnxt_fw_health *fw_health = bp->fw_health;
7734         struct hwrm_error_recovery_qcfg_output *resp;
7735         struct hwrm_error_recovery_qcfg_input *req;
7736         int rc, i;
7737
7738         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7739                 return 0;
7740
7741         rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7742         if (rc)
7743                 return rc;
7744
7745         resp = hwrm_req_hold(bp, req);
7746         rc = hwrm_req_send(bp, req);
7747         if (rc)
7748                 goto err_recovery_out;
7749         fw_health->flags = le32_to_cpu(resp->flags);
7750         if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7751             !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7752                 rc = -EINVAL;
7753                 goto err_recovery_out;
7754         }
7755         fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7756         fw_health->master_func_wait_dsecs =
7757                 le32_to_cpu(resp->master_func_wait_period);
7758         fw_health->normal_func_wait_dsecs =
7759                 le32_to_cpu(resp->normal_func_wait_period);
7760         fw_health->post_reset_wait_dsecs =
7761                 le32_to_cpu(resp->master_func_wait_period_after_reset);
7762         fw_health->post_reset_max_wait_dsecs =
7763                 le32_to_cpu(resp->max_bailout_time_after_reset);
7764         fw_health->regs[BNXT_FW_HEALTH_REG] =
7765                 le32_to_cpu(resp->fw_health_status_reg);
7766         fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7767                 le32_to_cpu(resp->fw_heartbeat_reg);
7768         fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7769                 le32_to_cpu(resp->fw_reset_cnt_reg);
7770         fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7771                 le32_to_cpu(resp->reset_inprogress_reg);
7772         fw_health->fw_reset_inprog_reg_mask =
7773                 le32_to_cpu(resp->reset_inprogress_reg_mask);
7774         fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7775         if (fw_health->fw_reset_seq_cnt >= 16) {
7776                 rc = -EINVAL;
7777                 goto err_recovery_out;
7778         }
7779         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7780                 fw_health->fw_reset_seq_regs[i] =
7781                         le32_to_cpu(resp->reset_reg[i]);
7782                 fw_health->fw_reset_seq_vals[i] =
7783                         le32_to_cpu(resp->reset_reg_val[i]);
7784                 fw_health->fw_reset_seq_delay_msec[i] =
7785                         resp->delay_after_reset[i];
7786         }
7787 err_recovery_out:
7788         hwrm_req_drop(bp, req);
7789         if (!rc)
7790                 rc = bnxt_map_fw_health_regs(bp);
7791         if (rc)
7792                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7793         return rc;
7794 }
7795
7796 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7797 {
7798         struct hwrm_func_reset_input *req;
7799         int rc;
7800
7801         rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
7802         if (rc)
7803                 return rc;
7804
7805         req->enables = 0;
7806         hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
7807         return hwrm_req_send(bp, req);
7808 }
7809
7810 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7811 {
7812         struct hwrm_nvm_get_dev_info_output nvm_info;
7813
7814         if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7815                 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7816                          nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7817                          nvm_info.nvm_cfg_ver_upd);
7818 }
7819
7820 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7821 {
7822         struct hwrm_queue_qportcfg_output *resp;
7823         struct hwrm_queue_qportcfg_input *req;
7824         u8 i, j, *qptr;
7825         bool no_rdma;
7826         int rc = 0;
7827
7828         rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
7829         if (rc)
7830                 return rc;
7831
7832         resp = hwrm_req_hold(bp, req);
7833         rc = hwrm_req_send(bp, req);
7834         if (rc)
7835                 goto qportcfg_exit;
7836
7837         if (!resp->max_configurable_queues) {
7838                 rc = -EINVAL;
7839                 goto qportcfg_exit;
7840         }
7841         bp->max_tc = resp->max_configurable_queues;
7842         bp->max_lltc = resp->max_configurable_lossless_queues;
7843         if (bp->max_tc > BNXT_MAX_QUEUE)
7844                 bp->max_tc = BNXT_MAX_QUEUE;
7845
7846         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7847         qptr = &resp->queue_id0;
7848         for (i = 0, j = 0; i < bp->max_tc; i++) {
7849                 bp->q_info[j].queue_id = *qptr;
7850                 bp->q_ids[i] = *qptr++;
7851                 bp->q_info[j].queue_profile = *qptr++;
7852                 bp->tc_to_qidx[j] = j;
7853                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7854                     (no_rdma && BNXT_PF(bp)))
7855                         j++;
7856         }
7857         bp->max_q = bp->max_tc;
7858         bp->max_tc = max_t(u8, j, 1);
7859
7860         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7861                 bp->max_tc = 1;
7862
7863         if (bp->max_lltc > bp->max_tc)
7864                 bp->max_lltc = bp->max_tc;
7865
7866 qportcfg_exit:
7867         hwrm_req_drop(bp, req);
7868         return rc;
7869 }
7870
7871 static int bnxt_hwrm_poll(struct bnxt *bp)
7872 {
7873         struct hwrm_ver_get_input *req;
7874         int rc;
7875
7876         rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7877         if (rc)
7878                 return rc;
7879
7880         req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7881         req->hwrm_intf_min = HWRM_VERSION_MINOR;
7882         req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7883
7884         hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
7885         rc = hwrm_req_send(bp, req);
7886         return rc;
7887 }
7888
7889 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7890 {
7891         struct hwrm_ver_get_output *resp;
7892         struct hwrm_ver_get_input *req;
7893         u16 fw_maj, fw_min, fw_bld, fw_rsv;
7894         u32 dev_caps_cfg, hwrm_ver;
7895         int rc, len;
7896
7897         rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7898         if (rc)
7899                 return rc;
7900
7901         hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
7902         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7903         req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7904         req->hwrm_intf_min = HWRM_VERSION_MINOR;
7905         req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7906
7907         resp = hwrm_req_hold(bp, req);
7908         rc = hwrm_req_send(bp, req);
7909         if (rc)
7910                 goto hwrm_ver_get_exit;
7911
7912         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7913
7914         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7915                              resp->hwrm_intf_min_8b << 8 |
7916                              resp->hwrm_intf_upd_8b;
7917         if (resp->hwrm_intf_maj_8b < 1) {
7918                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7919                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7920                             resp->hwrm_intf_upd_8b);
7921                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7922         }
7923
7924         hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7925                         HWRM_VERSION_UPDATE;
7926
7927         if (bp->hwrm_spec_code > hwrm_ver)
7928                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7929                          HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7930                          HWRM_VERSION_UPDATE);
7931         else
7932                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7933                          resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7934                          resp->hwrm_intf_upd_8b);
7935
7936         fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7937         if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7938                 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7939                 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7940                 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7941                 len = FW_VER_STR_LEN;
7942         } else {
7943                 fw_maj = resp->hwrm_fw_maj_8b;
7944                 fw_min = resp->hwrm_fw_min_8b;
7945                 fw_bld = resp->hwrm_fw_bld_8b;
7946                 fw_rsv = resp->hwrm_fw_rsvd_8b;
7947                 len = BC_HWRM_STR_LEN;
7948         }
7949         bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7950         snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7951                  fw_rsv);
7952
7953         if (strlen(resp->active_pkg_name)) {
7954                 int fw_ver_len = strlen(bp->fw_ver_str);
7955
7956                 snprintf(bp->fw_ver_str + fw_ver_len,
7957                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7958                          resp->active_pkg_name);
7959                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7960         }
7961
7962         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7963         if (!bp->hwrm_cmd_timeout)
7964                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7965
7966         if (resp->hwrm_intf_maj_8b >= 1) {
7967                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7968                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7969         }
7970         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7971                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7972
7973         bp->chip_num = le16_to_cpu(resp->chip_num);
7974         bp->chip_rev = resp->chip_rev;
7975         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7976             !resp->chip_metal)
7977                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7978
7979         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7980         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7981             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7982                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7983
7984         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7985                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7986
7987         if (dev_caps_cfg &
7988             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7989                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7990
7991         if (dev_caps_cfg &
7992             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7993                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7994
7995         if (dev_caps_cfg &
7996             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7997                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7998
7999 hwrm_ver_get_exit:
8000         hwrm_req_drop(bp, req);
8001         return rc;
8002 }
8003
8004 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8005 {
8006         struct hwrm_fw_set_time_input *req;
8007         struct tm tm;
8008         time64_t now = ktime_get_real_seconds();
8009         int rc;
8010
8011         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8012             bp->hwrm_spec_code < 0x10400)
8013                 return -EOPNOTSUPP;
8014
8015         time64_to_tm(now, 0, &tm);
8016         rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8017         if (rc)
8018                 return rc;
8019
8020         req->year = cpu_to_le16(1900 + tm.tm_year);
8021         req->month = 1 + tm.tm_mon;
8022         req->day = tm.tm_mday;
8023         req->hour = tm.tm_hour;
8024         req->minute = tm.tm_min;
8025         req->second = tm.tm_sec;
8026         return hwrm_req_send(bp, req);
8027 }
8028
8029 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8030 {
8031         u64 sw_tmp;
8032
8033         hw &= mask;
8034         sw_tmp = (*sw & ~mask) | hw;
8035         if (hw < (*sw & mask))
8036                 sw_tmp += mask + 1;
8037         WRITE_ONCE(*sw, sw_tmp);
8038 }
8039
8040 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8041                                     int count, bool ignore_zero)
8042 {
8043         int i;
8044
8045         for (i = 0; i < count; i++) {
8046                 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8047
8048                 if (ignore_zero && !hw)
8049                         continue;
8050
8051                 if (masks[i] == -1ULL)
8052                         sw_stats[i] = hw;
8053                 else
8054                         bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8055         }
8056 }
8057
8058 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8059 {
8060         if (!stats->hw_stats)
8061                 return;
8062
8063         __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8064                                 stats->hw_masks, stats->len / 8, false);
8065 }
8066
8067 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8068 {
8069         struct bnxt_stats_mem *ring0_stats;
8070         bool ignore_zero = false;
8071         int i;
8072
8073         /* Chip bug.  Counter intermittently becomes 0. */
8074         if (bp->flags & BNXT_FLAG_CHIP_P5)
8075                 ignore_zero = true;
8076
8077         for (i = 0; i < bp->cp_nr_rings; i++) {
8078                 struct bnxt_napi *bnapi = bp->bnapi[i];
8079                 struct bnxt_cp_ring_info *cpr;
8080                 struct bnxt_stats_mem *stats;
8081
8082                 cpr = &bnapi->cp_ring;
8083                 stats = &cpr->stats;
8084                 if (!i)
8085                         ring0_stats = stats;
8086                 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8087                                         ring0_stats->hw_masks,
8088                                         ring0_stats->len / 8, ignore_zero);
8089         }
8090         if (bp->flags & BNXT_FLAG_PORT_STATS) {
8091                 struct bnxt_stats_mem *stats = &bp->port_stats;
8092                 __le64 *hw_stats = stats->hw_stats;
8093                 u64 *sw_stats = stats->sw_stats;
8094                 u64 *masks = stats->hw_masks;
8095                 int cnt;
8096
8097                 cnt = sizeof(struct rx_port_stats) / 8;
8098                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8099
8100                 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8101                 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8102                 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8103                 cnt = sizeof(struct tx_port_stats) / 8;
8104                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8105         }
8106         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8107                 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8108                 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8109         }
8110 }
8111
8112 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8113 {
8114         struct hwrm_port_qstats_input *req;
8115         struct bnxt_pf_info *pf = &bp->pf;
8116         int rc;
8117
8118         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8119                 return 0;
8120
8121         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8122                 return -EOPNOTSUPP;
8123
8124         rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8125         if (rc)
8126                 return rc;
8127
8128         req->flags = flags;
8129         req->port_id = cpu_to_le16(pf->port_id);
8130         req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8131                                             BNXT_TX_PORT_STATS_BYTE_OFFSET);
8132         req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8133         return hwrm_req_send(bp, req);
8134 }
8135
8136 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8137 {
8138         struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8139         struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8140         struct hwrm_port_qstats_ext_output *resp_qs;
8141         struct hwrm_port_qstats_ext_input *req_qs;
8142         struct bnxt_pf_info *pf = &bp->pf;
8143         u32 tx_stat_size;
8144         int rc;
8145
8146         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8147                 return 0;
8148
8149         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8150                 return -EOPNOTSUPP;
8151
8152         rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8153         if (rc)
8154                 return rc;
8155
8156         req_qs->flags = flags;
8157         req_qs->port_id = cpu_to_le16(pf->port_id);
8158         req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8159         req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8160         tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8161                        sizeof(struct tx_port_stats_ext) : 0;
8162         req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8163         req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8164         resp_qs = hwrm_req_hold(bp, req_qs);
8165         rc = hwrm_req_send(bp, req_qs);
8166         if (!rc) {
8167                 bp->fw_rx_stats_ext_size =
8168                         le16_to_cpu(resp_qs->rx_stat_size) / 8;
8169                 bp->fw_tx_stats_ext_size = tx_stat_size ?
8170                         le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
8171         } else {
8172                 bp->fw_rx_stats_ext_size = 0;
8173                 bp->fw_tx_stats_ext_size = 0;
8174         }
8175         hwrm_req_drop(bp, req_qs);
8176
8177         if (flags)
8178                 return rc;
8179
8180         if (bp->fw_tx_stats_ext_size <=
8181             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8182                 bp->pri2cos_valid = 0;
8183                 return rc;
8184         }
8185
8186         rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8187         if (rc)
8188                 return rc;
8189
8190         req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8191
8192         resp_qc = hwrm_req_hold(bp, req_qc);
8193         rc = hwrm_req_send(bp, req_qc);
8194         if (!rc) {
8195                 u8 *pri2cos;
8196                 int i, j;
8197
8198                 pri2cos = &resp_qc->pri0_cos_queue_id;
8199                 for (i = 0; i < 8; i++) {
8200                         u8 queue_id = pri2cos[i];
8201                         u8 queue_idx;
8202
8203                         /* Per port queue IDs start from 0, 10, 20, etc */
8204                         queue_idx = queue_id % 10;
8205                         if (queue_idx > BNXT_MAX_QUEUE) {
8206                                 bp->pri2cos_valid = false;
8207                                 hwrm_req_drop(bp, req_qc);
8208                                 return rc;
8209                         }
8210                         for (j = 0; j < bp->max_q; j++) {
8211                                 if (bp->q_ids[j] == queue_id)
8212                                         bp->pri2cos_idx[i] = queue_idx;
8213                         }
8214                 }
8215                 bp->pri2cos_valid = true;
8216         }
8217         hwrm_req_drop(bp, req_qc);
8218
8219         return rc;
8220 }
8221
8222 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8223 {
8224         if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
8225                 bnxt_hwrm_tunnel_dst_port_free(
8226                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8227         if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
8228                 bnxt_hwrm_tunnel_dst_port_free(
8229                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8230 }
8231
8232 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8233 {
8234         int rc, i;
8235         u32 tpa_flags = 0;
8236
8237         if (set_tpa)
8238                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8239         else if (BNXT_NO_FW_ACCESS(bp))
8240                 return 0;
8241         for (i = 0; i < bp->nr_vnics; i++) {
8242                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8243                 if (rc) {
8244                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8245                                    i, rc);
8246                         return rc;
8247                 }
8248         }
8249         return 0;
8250 }
8251
8252 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8253 {
8254         int i;
8255
8256         for (i = 0; i < bp->nr_vnics; i++)
8257                 bnxt_hwrm_vnic_set_rss(bp, i, false);
8258 }
8259
8260 static void bnxt_clear_vnic(struct bnxt *bp)
8261 {
8262         if (!bp->vnic_info)
8263                 return;
8264
8265         bnxt_hwrm_clear_vnic_filter(bp);
8266         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8267                 /* clear all RSS setting before free vnic ctx */
8268                 bnxt_hwrm_clear_vnic_rss(bp);
8269                 bnxt_hwrm_vnic_ctx_free(bp);
8270         }
8271         /* before free the vnic, undo the vnic tpa settings */
8272         if (bp->flags & BNXT_FLAG_TPA)
8273                 bnxt_set_tpa(bp, false);
8274         bnxt_hwrm_vnic_free(bp);
8275         if (bp->flags & BNXT_FLAG_CHIP_P5)
8276                 bnxt_hwrm_vnic_ctx_free(bp);
8277 }
8278
8279 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8280                                     bool irq_re_init)
8281 {
8282         bnxt_clear_vnic(bp);
8283         bnxt_hwrm_ring_free(bp, close_path);
8284         bnxt_hwrm_ring_grp_free(bp);
8285         if (irq_re_init) {
8286                 bnxt_hwrm_stat_ctx_free(bp);
8287                 bnxt_hwrm_free_tunnel_ports(bp);
8288         }
8289 }
8290
8291 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8292 {
8293         struct hwrm_func_cfg_input *req;
8294         u8 evb_mode;
8295         int rc;
8296
8297         if (br_mode == BRIDGE_MODE_VEB)
8298                 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8299         else if (br_mode == BRIDGE_MODE_VEPA)
8300                 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8301         else
8302                 return -EINVAL;
8303
8304         rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8305         if (rc)
8306                 return rc;
8307
8308         req->fid = cpu_to_le16(0xffff);
8309         req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8310         req->evb_mode = evb_mode;
8311         return hwrm_req_send(bp, req);
8312 }
8313
8314 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8315 {
8316         struct hwrm_func_cfg_input *req;
8317         int rc;
8318
8319         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8320                 return 0;
8321
8322         rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8323         if (rc)
8324                 return rc;
8325
8326         req->fid = cpu_to_le16(0xffff);
8327         req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8328         req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8329         if (size == 128)
8330                 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8331
8332         return hwrm_req_send(bp, req);
8333 }
8334
8335 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8336 {
8337         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8338         int rc;
8339
8340         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8341                 goto skip_rss_ctx;
8342
8343         /* allocate context for vnic */
8344         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8345         if (rc) {
8346                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8347                            vnic_id, rc);
8348                 goto vnic_setup_err;
8349         }
8350         bp->rsscos_nr_ctxs++;
8351
8352         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8353                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8354                 if (rc) {
8355                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8356                                    vnic_id, rc);
8357                         goto vnic_setup_err;
8358                 }
8359                 bp->rsscos_nr_ctxs++;
8360         }
8361
8362 skip_rss_ctx:
8363         /* configure default vnic, ring grp */
8364         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8365         if (rc) {
8366                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8367                            vnic_id, rc);
8368                 goto vnic_setup_err;
8369         }
8370
8371         /* Enable RSS hashing on vnic */
8372         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8373         if (rc) {
8374                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8375                            vnic_id, rc);
8376                 goto vnic_setup_err;
8377         }
8378
8379         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8380                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8381                 if (rc) {
8382                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8383                                    vnic_id, rc);
8384                 }
8385         }
8386
8387 vnic_setup_err:
8388         return rc;
8389 }
8390
8391 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8392 {
8393         int rc, i, nr_ctxs;
8394
8395         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8396         for (i = 0; i < nr_ctxs; i++) {
8397                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8398                 if (rc) {
8399                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8400                                    vnic_id, i, rc);
8401                         break;
8402                 }
8403                 bp->rsscos_nr_ctxs++;
8404         }
8405         if (i < nr_ctxs)
8406                 return -ENOMEM;
8407
8408         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8409         if (rc) {
8410                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8411                            vnic_id, rc);
8412                 return rc;
8413         }
8414         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8415         if (rc) {
8416                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8417                            vnic_id, rc);
8418                 return rc;
8419         }
8420         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8421                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8422                 if (rc) {
8423                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8424                                    vnic_id, rc);
8425                 }
8426         }
8427         return rc;
8428 }
8429
8430 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8431 {
8432         if (bp->flags & BNXT_FLAG_CHIP_P5)
8433                 return __bnxt_setup_vnic_p5(bp, vnic_id);
8434         else
8435                 return __bnxt_setup_vnic(bp, vnic_id);
8436 }
8437
8438 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8439 {
8440 #ifdef CONFIG_RFS_ACCEL
8441         int i, rc = 0;
8442
8443         if (bp->flags & BNXT_FLAG_CHIP_P5)
8444                 return 0;
8445
8446         for (i = 0; i < bp->rx_nr_rings; i++) {
8447                 struct bnxt_vnic_info *vnic;
8448                 u16 vnic_id = i + 1;
8449                 u16 ring_id = i;
8450
8451                 if (vnic_id >= bp->nr_vnics)
8452                         break;
8453
8454                 vnic = &bp->vnic_info[vnic_id];
8455                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8456                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8457                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8458                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8459                 if (rc) {
8460                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8461                                    vnic_id, rc);
8462                         break;
8463                 }
8464                 rc = bnxt_setup_vnic(bp, vnic_id);
8465                 if (rc)
8466                         break;
8467         }
8468         return rc;
8469 #else
8470         return 0;
8471 #endif
8472 }
8473
8474 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8475 static bool bnxt_promisc_ok(struct bnxt *bp)
8476 {
8477 #ifdef CONFIG_BNXT_SRIOV
8478         if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8479                 return false;
8480 #endif
8481         return true;
8482 }
8483
8484 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8485 {
8486         unsigned int rc = 0;
8487
8488         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8489         if (rc) {
8490                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8491                            rc);
8492                 return rc;
8493         }
8494
8495         rc = bnxt_hwrm_vnic_cfg(bp, 1);
8496         if (rc) {
8497                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8498                            rc);
8499                 return rc;
8500         }
8501         return rc;
8502 }
8503
8504 static int bnxt_cfg_rx_mode(struct bnxt *);
8505 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8506
8507 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8508 {
8509         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8510         int rc = 0;
8511         unsigned int rx_nr_rings = bp->rx_nr_rings;
8512
8513         if (irq_re_init) {
8514                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8515                 if (rc) {
8516                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8517                                    rc);
8518                         goto err_out;
8519                 }
8520         }
8521
8522         rc = bnxt_hwrm_ring_alloc(bp);
8523         if (rc) {
8524                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8525                 goto err_out;
8526         }
8527
8528         rc = bnxt_hwrm_ring_grp_alloc(bp);
8529         if (rc) {
8530                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8531                 goto err_out;
8532         }
8533
8534         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8535                 rx_nr_rings--;
8536
8537         /* default vnic 0 */
8538         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8539         if (rc) {
8540                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8541                 goto err_out;
8542         }
8543
8544         rc = bnxt_setup_vnic(bp, 0);
8545         if (rc)
8546                 goto err_out;
8547
8548         if (bp->flags & BNXT_FLAG_RFS) {
8549                 rc = bnxt_alloc_rfs_vnics(bp);
8550                 if (rc)
8551                         goto err_out;
8552         }
8553
8554         if (bp->flags & BNXT_FLAG_TPA) {
8555                 rc = bnxt_set_tpa(bp, true);
8556                 if (rc)
8557                         goto err_out;
8558         }
8559
8560         if (BNXT_VF(bp))
8561                 bnxt_update_vf_mac(bp);
8562
8563         /* Filter for default vnic 0 */
8564         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8565         if (rc) {
8566                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8567                 goto err_out;
8568         }
8569         vnic->uc_filter_count = 1;
8570
8571         vnic->rx_mask = 0;
8572         if (bp->dev->flags & IFF_BROADCAST)
8573                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8574
8575         if (bp->dev->flags & IFF_PROMISC)
8576                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8577
8578         if (bp->dev->flags & IFF_ALLMULTI) {
8579                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8580                 vnic->mc_list_count = 0;
8581         } else {
8582                 u32 mask = 0;
8583
8584                 bnxt_mc_list_updated(bp, &mask);
8585                 vnic->rx_mask |= mask;
8586         }
8587
8588         rc = bnxt_cfg_rx_mode(bp);
8589         if (rc)
8590                 goto err_out;
8591
8592         rc = bnxt_hwrm_set_coal(bp);
8593         if (rc)
8594                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8595                                 rc);
8596
8597         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8598                 rc = bnxt_setup_nitroa0_vnic(bp);
8599                 if (rc)
8600                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8601                                    rc);
8602         }
8603
8604         if (BNXT_VF(bp)) {
8605                 bnxt_hwrm_func_qcfg(bp);
8606                 netdev_update_features(bp->dev);
8607         }
8608
8609         return 0;
8610
8611 err_out:
8612         bnxt_hwrm_resource_free(bp, 0, true);
8613
8614         return rc;
8615 }
8616
8617 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8618 {
8619         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8620         return 0;
8621 }
8622
8623 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8624 {
8625         bnxt_init_cp_rings(bp);
8626         bnxt_init_rx_rings(bp);
8627         bnxt_init_tx_rings(bp);
8628         bnxt_init_ring_grps(bp, irq_re_init);
8629         bnxt_init_vnics(bp);
8630
8631         return bnxt_init_chip(bp, irq_re_init);
8632 }
8633
8634 static int bnxt_set_real_num_queues(struct bnxt *bp)
8635 {
8636         int rc;
8637         struct net_device *dev = bp->dev;
8638
8639         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8640                                           bp->tx_nr_rings_xdp);
8641         if (rc)
8642                 return rc;
8643
8644         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8645         if (rc)
8646                 return rc;
8647
8648 #ifdef CONFIG_RFS_ACCEL
8649         if (bp->flags & BNXT_FLAG_RFS)
8650                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8651 #endif
8652
8653         return rc;
8654 }
8655
8656 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8657                            bool shared)
8658 {
8659         int _rx = *rx, _tx = *tx;
8660
8661         if (shared) {
8662                 *rx = min_t(int, _rx, max);
8663                 *tx = min_t(int, _tx, max);
8664         } else {
8665                 if (max < 2)
8666                         return -ENOMEM;
8667
8668                 while (_rx + _tx > max) {
8669                         if (_rx > _tx && _rx > 1)
8670                                 _rx--;
8671                         else if (_tx > 1)
8672                                 _tx--;
8673                 }
8674                 *rx = _rx;
8675                 *tx = _tx;
8676         }
8677         return 0;
8678 }
8679
8680 static void bnxt_setup_msix(struct bnxt *bp)
8681 {
8682         const int len = sizeof(bp->irq_tbl[0].name);
8683         struct net_device *dev = bp->dev;
8684         int tcs, i;
8685
8686         tcs = netdev_get_num_tc(dev);
8687         if (tcs) {
8688                 int i, off, count;
8689
8690                 for (i = 0; i < tcs; i++) {
8691                         count = bp->tx_nr_rings_per_tc;
8692                         off = i * count;
8693                         netdev_set_tc_queue(dev, i, count, off);
8694                 }
8695         }
8696
8697         for (i = 0; i < bp->cp_nr_rings; i++) {
8698                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8699                 char *attr;
8700
8701                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8702                         attr = "TxRx";
8703                 else if (i < bp->rx_nr_rings)
8704                         attr = "rx";
8705                 else
8706                         attr = "tx";
8707
8708                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8709                          attr, i);
8710                 bp->irq_tbl[map_idx].handler = bnxt_msix;
8711         }
8712 }
8713
8714 static void bnxt_setup_inta(struct bnxt *bp)
8715 {
8716         const int len = sizeof(bp->irq_tbl[0].name);
8717
8718         if (netdev_get_num_tc(bp->dev))
8719                 netdev_reset_tc(bp->dev);
8720
8721         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8722                  0);
8723         bp->irq_tbl[0].handler = bnxt_inta;
8724 }
8725
8726 static int bnxt_init_int_mode(struct bnxt *bp);
8727
8728 static int bnxt_setup_int_mode(struct bnxt *bp)
8729 {
8730         int rc;
8731
8732         if (!bp->irq_tbl) {
8733                 rc = bnxt_init_int_mode(bp);
8734                 if (rc || !bp->irq_tbl)
8735                         return rc ?: -ENODEV;
8736         }
8737
8738         if (bp->flags & BNXT_FLAG_USING_MSIX)
8739                 bnxt_setup_msix(bp);
8740         else
8741                 bnxt_setup_inta(bp);
8742
8743         rc = bnxt_set_real_num_queues(bp);
8744         return rc;
8745 }
8746
8747 #ifdef CONFIG_RFS_ACCEL
8748 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8749 {
8750         return bp->hw_resc.max_rsscos_ctxs;
8751 }
8752
8753 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8754 {
8755         return bp->hw_resc.max_vnics;
8756 }
8757 #endif
8758
8759 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8760 {
8761         return bp->hw_resc.max_stat_ctxs;
8762 }
8763
8764 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8765 {
8766         return bp->hw_resc.max_cp_rings;
8767 }
8768
8769 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8770 {
8771         unsigned int cp = bp->hw_resc.max_cp_rings;
8772
8773         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8774                 cp -= bnxt_get_ulp_msix_num(bp);
8775
8776         return cp;
8777 }
8778
8779 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8780 {
8781         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8782
8783         if (bp->flags & BNXT_FLAG_CHIP_P5)
8784                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8785
8786         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8787 }
8788
8789 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8790 {
8791         bp->hw_resc.max_irqs = max_irqs;
8792 }
8793
8794 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8795 {
8796         unsigned int cp;
8797
8798         cp = bnxt_get_max_func_cp_rings_for_en(bp);
8799         if (bp->flags & BNXT_FLAG_CHIP_P5)
8800                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8801         else
8802                 return cp - bp->cp_nr_rings;
8803 }
8804
8805 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8806 {
8807         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8808 }
8809
8810 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8811 {
8812         int max_cp = bnxt_get_max_func_cp_rings(bp);
8813         int max_irq = bnxt_get_max_func_irqs(bp);
8814         int total_req = bp->cp_nr_rings + num;
8815         int max_idx, avail_msix;
8816
8817         max_idx = bp->total_irqs;
8818         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8819                 max_idx = min_t(int, bp->total_irqs, max_cp);
8820         avail_msix = max_idx - bp->cp_nr_rings;
8821         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8822                 return avail_msix;
8823
8824         if (max_irq < total_req) {
8825                 num = max_irq - bp->cp_nr_rings;
8826                 if (num <= 0)
8827                         return 0;
8828         }
8829         return num;
8830 }
8831
8832 static int bnxt_get_num_msix(struct bnxt *bp)
8833 {
8834         if (!BNXT_NEW_RM(bp))
8835                 return bnxt_get_max_func_irqs(bp);
8836
8837         return bnxt_nq_rings_in_use(bp);
8838 }
8839
8840 static int bnxt_init_msix(struct bnxt *bp)
8841 {
8842         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8843         struct msix_entry *msix_ent;
8844
8845         total_vecs = bnxt_get_num_msix(bp);
8846         max = bnxt_get_max_func_irqs(bp);
8847         if (total_vecs > max)
8848                 total_vecs = max;
8849
8850         if (!total_vecs)
8851                 return 0;
8852
8853         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8854         if (!msix_ent)
8855                 return -ENOMEM;
8856
8857         for (i = 0; i < total_vecs; i++) {
8858                 msix_ent[i].entry = i;
8859                 msix_ent[i].vector = 0;
8860         }
8861
8862         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8863                 min = 2;
8864
8865         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8866         ulp_msix = bnxt_get_ulp_msix_num(bp);
8867         if (total_vecs < 0 || total_vecs < ulp_msix) {
8868                 rc = -ENODEV;
8869                 goto msix_setup_exit;
8870         }
8871
8872         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8873         if (bp->irq_tbl) {
8874                 for (i = 0; i < total_vecs; i++)
8875                         bp->irq_tbl[i].vector = msix_ent[i].vector;
8876
8877                 bp->total_irqs = total_vecs;
8878                 /* Trim rings based upon num of vectors allocated */
8879                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8880                                      total_vecs - ulp_msix, min == 1);
8881                 if (rc)
8882                         goto msix_setup_exit;
8883
8884                 bp->cp_nr_rings = (min == 1) ?
8885                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8886                                   bp->tx_nr_rings + bp->rx_nr_rings;
8887
8888         } else {
8889                 rc = -ENOMEM;
8890                 goto msix_setup_exit;
8891         }
8892         bp->flags |= BNXT_FLAG_USING_MSIX;
8893         kfree(msix_ent);
8894         return 0;
8895
8896 msix_setup_exit:
8897         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8898         kfree(bp->irq_tbl);
8899         bp->irq_tbl = NULL;
8900         pci_disable_msix(bp->pdev);
8901         kfree(msix_ent);
8902         return rc;
8903 }
8904
8905 static int bnxt_init_inta(struct bnxt *bp)
8906 {
8907         bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
8908         if (!bp->irq_tbl)
8909                 return -ENOMEM;
8910
8911         bp->total_irqs = 1;
8912         bp->rx_nr_rings = 1;
8913         bp->tx_nr_rings = 1;
8914         bp->cp_nr_rings = 1;
8915         bp->flags |= BNXT_FLAG_SHARED_RINGS;
8916         bp->irq_tbl[0].vector = bp->pdev->irq;
8917         return 0;
8918 }
8919
8920 static int bnxt_init_int_mode(struct bnxt *bp)
8921 {
8922         int rc = -ENODEV;
8923
8924         if (bp->flags & BNXT_FLAG_MSIX_CAP)
8925                 rc = bnxt_init_msix(bp);
8926
8927         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8928                 /* fallback to INTA */
8929                 rc = bnxt_init_inta(bp);
8930         }
8931         return rc;
8932 }
8933
8934 static void bnxt_clear_int_mode(struct bnxt *bp)
8935 {
8936         if (bp->flags & BNXT_FLAG_USING_MSIX)
8937                 pci_disable_msix(bp->pdev);
8938
8939         kfree(bp->irq_tbl);
8940         bp->irq_tbl = NULL;
8941         bp->flags &= ~BNXT_FLAG_USING_MSIX;
8942 }
8943
8944 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8945 {
8946         int tcs = netdev_get_num_tc(bp->dev);
8947         bool irq_cleared = false;
8948         int rc;
8949
8950         if (!bnxt_need_reserve_rings(bp))
8951                 return 0;
8952
8953         if (irq_re_init && BNXT_NEW_RM(bp) &&
8954             bnxt_get_num_msix(bp) != bp->total_irqs) {
8955                 bnxt_ulp_irq_stop(bp);
8956                 bnxt_clear_int_mode(bp);
8957                 irq_cleared = true;
8958         }
8959         rc = __bnxt_reserve_rings(bp);
8960         if (irq_cleared) {
8961                 if (!rc)
8962                         rc = bnxt_init_int_mode(bp);
8963                 bnxt_ulp_irq_restart(bp, rc);
8964         }
8965         if (rc) {
8966                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8967                 return rc;
8968         }
8969         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8970                 netdev_err(bp->dev, "tx ring reservation failure\n");
8971                 netdev_reset_tc(bp->dev);
8972                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8973                 return -ENOMEM;
8974         }
8975         return 0;
8976 }
8977
8978 static void bnxt_free_irq(struct bnxt *bp)
8979 {
8980         struct bnxt_irq *irq;
8981         int i;
8982
8983 #ifdef CONFIG_RFS_ACCEL
8984         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8985         bp->dev->rx_cpu_rmap = NULL;
8986 #endif
8987         if (!bp->irq_tbl || !bp->bnapi)
8988                 return;
8989
8990         for (i = 0; i < bp->cp_nr_rings; i++) {
8991                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8992
8993                 irq = &bp->irq_tbl[map_idx];
8994                 if (irq->requested) {
8995                         if (irq->have_cpumask) {
8996                                 irq_set_affinity_hint(irq->vector, NULL);
8997                                 free_cpumask_var(irq->cpu_mask);
8998                                 irq->have_cpumask = 0;
8999                         }
9000                         free_irq(irq->vector, bp->bnapi[i]);
9001                 }
9002
9003                 irq->requested = 0;
9004         }
9005 }
9006
9007 static int bnxt_request_irq(struct bnxt *bp)
9008 {
9009         int i, j, rc = 0;
9010         unsigned long flags = 0;
9011 #ifdef CONFIG_RFS_ACCEL
9012         struct cpu_rmap *rmap;
9013 #endif
9014
9015         rc = bnxt_setup_int_mode(bp);
9016         if (rc) {
9017                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9018                            rc);
9019                 return rc;
9020         }
9021 #ifdef CONFIG_RFS_ACCEL
9022         rmap = bp->dev->rx_cpu_rmap;
9023 #endif
9024         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9025                 flags = IRQF_SHARED;
9026
9027         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9028                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9029                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9030
9031 #ifdef CONFIG_RFS_ACCEL
9032                 if (rmap && bp->bnapi[i]->rx_ring) {
9033                         rc = irq_cpu_rmap_add(rmap, irq->vector);
9034                         if (rc)
9035                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9036                                             j);
9037                         j++;
9038                 }
9039 #endif
9040                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9041                                  bp->bnapi[i]);
9042                 if (rc)
9043                         break;
9044
9045                 irq->requested = 1;
9046
9047                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9048                         int numa_node = dev_to_node(&bp->pdev->dev);
9049
9050                         irq->have_cpumask = 1;
9051                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9052                                         irq->cpu_mask);
9053                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9054                         if (rc) {
9055                                 netdev_warn(bp->dev,
9056                                             "Set affinity failed, IRQ = %d\n",
9057                                             irq->vector);
9058                                 break;
9059                         }
9060                 }
9061         }
9062         return rc;
9063 }
9064
9065 static void bnxt_del_napi(struct bnxt *bp)
9066 {
9067         int i;
9068
9069         if (!bp->bnapi)
9070                 return;
9071
9072         for (i = 0; i < bp->cp_nr_rings; i++) {
9073                 struct bnxt_napi *bnapi = bp->bnapi[i];
9074
9075                 __netif_napi_del(&bnapi->napi);
9076         }
9077         /* We called __netif_napi_del(), we need
9078          * to respect an RCU grace period before freeing napi structures.
9079          */
9080         synchronize_net();
9081 }
9082
9083 static void bnxt_init_napi(struct bnxt *bp)
9084 {
9085         int i;
9086         unsigned int cp_nr_rings = bp->cp_nr_rings;
9087         struct bnxt_napi *bnapi;
9088
9089         if (bp->flags & BNXT_FLAG_USING_MSIX) {
9090                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9091
9092                 if (bp->flags & BNXT_FLAG_CHIP_P5)
9093                         poll_fn = bnxt_poll_p5;
9094                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9095                         cp_nr_rings--;
9096                 for (i = 0; i < cp_nr_rings; i++) {
9097                         bnapi = bp->bnapi[i];
9098                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
9099                 }
9100                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9101                         bnapi = bp->bnapi[cp_nr_rings];
9102                         netif_napi_add(bp->dev, &bnapi->napi,
9103                                        bnxt_poll_nitroa0, 64);
9104                 }
9105         } else {
9106                 bnapi = bp->bnapi[0];
9107                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
9108         }
9109 }
9110
9111 static void bnxt_disable_napi(struct bnxt *bp)
9112 {
9113         int i;
9114
9115         if (!bp->bnapi ||
9116             test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9117                 return;
9118
9119         for (i = 0; i < bp->cp_nr_rings; i++) {
9120                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9121
9122                 napi_disable(&bp->bnapi[i]->napi);
9123                 if (bp->bnapi[i]->rx_ring)
9124                         cancel_work_sync(&cpr->dim.work);
9125         }
9126 }
9127
9128 static void bnxt_enable_napi(struct bnxt *bp)
9129 {
9130         int i;
9131
9132         clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9133         for (i = 0; i < bp->cp_nr_rings; i++) {
9134                 struct bnxt_napi *bnapi = bp->bnapi[i];
9135                 struct bnxt_cp_ring_info *cpr;
9136
9137                 cpr = &bnapi->cp_ring;
9138                 if (bnapi->in_reset)
9139                         cpr->sw_stats.rx.rx_resets++;
9140                 bnapi->in_reset = false;
9141
9142                 if (bnapi->rx_ring) {
9143                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9144                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9145                 }
9146                 napi_enable(&bnapi->napi);
9147         }
9148 }
9149
9150 void bnxt_tx_disable(struct bnxt *bp)
9151 {
9152         int i;
9153         struct bnxt_tx_ring_info *txr;
9154
9155         if (bp->tx_ring) {
9156                 for (i = 0; i < bp->tx_nr_rings; i++) {
9157                         txr = &bp->tx_ring[i];
9158                         WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
9159                 }
9160         }
9161         /* Make sure napi polls see @dev_state change */
9162         synchronize_net();
9163         /* Drop carrier first to prevent TX timeout */
9164         netif_carrier_off(bp->dev);
9165         /* Stop all TX queues */
9166         netif_tx_disable(bp->dev);
9167 }
9168
9169 void bnxt_tx_enable(struct bnxt *bp)
9170 {
9171         int i;
9172         struct bnxt_tx_ring_info *txr;
9173
9174         for (i = 0; i < bp->tx_nr_rings; i++) {
9175                 txr = &bp->tx_ring[i];
9176                 WRITE_ONCE(txr->dev_state, 0);
9177         }
9178         /* Make sure napi polls see @dev_state change */
9179         synchronize_net();
9180         netif_tx_wake_all_queues(bp->dev);
9181         if (bp->link_info.link_up)
9182                 netif_carrier_on(bp->dev);
9183 }
9184
9185 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9186 {
9187         u8 active_fec = link_info->active_fec_sig_mode &
9188                         PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9189
9190         switch (active_fec) {
9191         default:
9192         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9193                 return "None";
9194         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9195                 return "Clause 74 BaseR";
9196         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9197                 return "Clause 91 RS(528,514)";
9198         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9199                 return "Clause 91 RS544_1XN";
9200         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9201                 return "Clause 91 RS(544,514)";
9202         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9203                 return "Clause 91 RS272_1XN";
9204         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9205                 return "Clause 91 RS(272,257)";
9206         }
9207 }
9208
9209 static void bnxt_report_link(struct bnxt *bp)
9210 {
9211         if (bp->link_info.link_up) {
9212                 const char *signal = "";
9213                 const char *flow_ctrl;
9214                 const char *duplex;
9215                 u32 speed;
9216                 u16 fec;
9217
9218                 netif_carrier_on(bp->dev);
9219                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9220                 if (speed == SPEED_UNKNOWN) {
9221                         netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9222                         return;
9223                 }
9224                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9225                         duplex = "full";
9226                 else
9227                         duplex = "half";
9228                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9229                         flow_ctrl = "ON - receive & transmit";
9230                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9231                         flow_ctrl = "ON - transmit";
9232                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9233                         flow_ctrl = "ON - receive";
9234                 else
9235                         flow_ctrl = "none";
9236                 if (bp->link_info.phy_qcfg_resp.option_flags &
9237                     PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9238                         u8 sig_mode = bp->link_info.active_fec_sig_mode &
9239                                       PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9240                         switch (sig_mode) {
9241                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9242                                 signal = "(NRZ) ";
9243                                 break;
9244                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9245                                 signal = "(PAM4) ";
9246                                 break;
9247                         default:
9248                                 break;
9249                         }
9250                 }
9251                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9252                             speed, signal, duplex, flow_ctrl);
9253                 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9254                         netdev_info(bp->dev, "EEE is %s\n",
9255                                     bp->eee.eee_active ? "active" :
9256                                                          "not active");
9257                 fec = bp->link_info.fec_cfg;
9258                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9259                         netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9260                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9261                                     bnxt_report_fec(&bp->link_info));
9262         } else {
9263                 netif_carrier_off(bp->dev);
9264                 netdev_err(bp->dev, "NIC Link is Down\n");
9265         }
9266 }
9267
9268 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9269 {
9270         if (!resp->supported_speeds_auto_mode &&
9271             !resp->supported_speeds_force_mode &&
9272             !resp->supported_pam4_speeds_auto_mode &&
9273             !resp->supported_pam4_speeds_force_mode)
9274                 return true;
9275         return false;
9276 }
9277
9278 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9279 {
9280         struct bnxt_link_info *link_info = &bp->link_info;
9281         struct hwrm_port_phy_qcaps_output *resp;
9282         struct hwrm_port_phy_qcaps_input *req;
9283         int rc = 0;
9284
9285         if (bp->hwrm_spec_code < 0x10201)
9286                 return 0;
9287
9288         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9289         if (rc)
9290                 return rc;
9291
9292         resp = hwrm_req_hold(bp, req);
9293         rc = hwrm_req_send(bp, req);
9294         if (rc)
9295                 goto hwrm_phy_qcaps_exit;
9296
9297         bp->phy_flags = resp->flags;
9298         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9299                 struct ethtool_eee *eee = &bp->eee;
9300                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9301
9302                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9303                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9304                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9305                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9306                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9307         }
9308
9309         if (bp->hwrm_spec_code >= 0x10a01) {
9310                 if (bnxt_phy_qcaps_no_speed(resp)) {
9311                         link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9312                         netdev_warn(bp->dev, "Ethernet link disabled\n");
9313                 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9314                         link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9315                         netdev_info(bp->dev, "Ethernet link enabled\n");
9316                         /* Phy re-enabled, reprobe the speeds */
9317                         link_info->support_auto_speeds = 0;
9318                         link_info->support_pam4_auto_speeds = 0;
9319                 }
9320         }
9321         if (resp->supported_speeds_auto_mode)
9322                 link_info->support_auto_speeds =
9323                         le16_to_cpu(resp->supported_speeds_auto_mode);
9324         if (resp->supported_pam4_speeds_auto_mode)
9325                 link_info->support_pam4_auto_speeds =
9326                         le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9327
9328         bp->port_count = resp->port_cnt;
9329
9330 hwrm_phy_qcaps_exit:
9331         hwrm_req_drop(bp, req);
9332         return rc;
9333 }
9334
9335 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9336 {
9337         u16 diff = advertising ^ supported;
9338
9339         return ((supported | diff) != supported);
9340 }
9341
9342 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9343 {
9344         struct bnxt_link_info *link_info = &bp->link_info;
9345         struct hwrm_port_phy_qcfg_output *resp;
9346         struct hwrm_port_phy_qcfg_input *req;
9347         u8 link_up = link_info->link_up;
9348         bool support_changed = false;
9349         int rc;
9350
9351         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9352         if (rc)
9353                 return rc;
9354
9355         resp = hwrm_req_hold(bp, req);
9356         rc = hwrm_req_send(bp, req);
9357         if (rc) {
9358                 hwrm_req_drop(bp, req);
9359                 return rc;
9360         }
9361
9362         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9363         link_info->phy_link_status = resp->link;
9364         link_info->duplex = resp->duplex_cfg;
9365         if (bp->hwrm_spec_code >= 0x10800)
9366                 link_info->duplex = resp->duplex_state;
9367         link_info->pause = resp->pause;
9368         link_info->auto_mode = resp->auto_mode;
9369         link_info->auto_pause_setting = resp->auto_pause;
9370         link_info->lp_pause = resp->link_partner_adv_pause;
9371         link_info->force_pause_setting = resp->force_pause;
9372         link_info->duplex_setting = resp->duplex_cfg;
9373         if (link_info->phy_link_status == BNXT_LINK_LINK)
9374                 link_info->link_speed = le16_to_cpu(resp->link_speed);
9375         else
9376                 link_info->link_speed = 0;
9377         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9378         link_info->force_pam4_link_speed =
9379                 le16_to_cpu(resp->force_pam4_link_speed);
9380         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9381         link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9382         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9383         link_info->auto_pam4_link_speeds =
9384                 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9385         link_info->lp_auto_link_speeds =
9386                 le16_to_cpu(resp->link_partner_adv_speeds);
9387         link_info->lp_auto_pam4_link_speeds =
9388                 resp->link_partner_pam4_adv_speeds;
9389         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9390         link_info->phy_ver[0] = resp->phy_maj;
9391         link_info->phy_ver[1] = resp->phy_min;
9392         link_info->phy_ver[2] = resp->phy_bld;
9393         link_info->media_type = resp->media_type;
9394         link_info->phy_type = resp->phy_type;
9395         link_info->transceiver = resp->xcvr_pkg_type;
9396         link_info->phy_addr = resp->eee_config_phy_addr &
9397                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9398         link_info->module_status = resp->module_status;
9399
9400         if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9401                 struct ethtool_eee *eee = &bp->eee;
9402                 u16 fw_speeds;
9403
9404                 eee->eee_active = 0;
9405                 if (resp->eee_config_phy_addr &
9406                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9407                         eee->eee_active = 1;
9408                         fw_speeds = le16_to_cpu(
9409                                 resp->link_partner_adv_eee_link_speed_mask);
9410                         eee->lp_advertised =
9411                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9412                 }
9413
9414                 /* Pull initial EEE config */
9415                 if (!chng_link_state) {
9416                         if (resp->eee_config_phy_addr &
9417                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9418                                 eee->eee_enabled = 1;
9419
9420                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9421                         eee->advertised =
9422                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9423
9424                         if (resp->eee_config_phy_addr &
9425                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9426                                 __le32 tmr;
9427
9428                                 eee->tx_lpi_enabled = 1;
9429                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9430                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9431                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9432                         }
9433                 }
9434         }
9435
9436         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9437         if (bp->hwrm_spec_code >= 0x10504) {
9438                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9439                 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9440         }
9441         /* TODO: need to add more logic to report VF link */
9442         if (chng_link_state) {
9443                 if (link_info->phy_link_status == BNXT_LINK_LINK)
9444                         link_info->link_up = 1;
9445                 else
9446                         link_info->link_up = 0;
9447                 if (link_up != link_info->link_up)
9448                         bnxt_report_link(bp);
9449         } else {
9450                 /* alwasy link down if not require to update link state */
9451                 link_info->link_up = 0;
9452         }
9453         hwrm_req_drop(bp, req);
9454
9455         if (!BNXT_PHY_CFG_ABLE(bp))
9456                 return 0;
9457
9458         /* Check if any advertised speeds are no longer supported. The caller
9459          * holds the link_lock mutex, so we can modify link_info settings.
9460          */
9461         if (bnxt_support_dropped(link_info->advertising,
9462                                  link_info->support_auto_speeds)) {
9463                 link_info->advertising = link_info->support_auto_speeds;
9464                 support_changed = true;
9465         }
9466         if (bnxt_support_dropped(link_info->advertising_pam4,
9467                                  link_info->support_pam4_auto_speeds)) {
9468                 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9469                 support_changed = true;
9470         }
9471         if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9472                 bnxt_hwrm_set_link_setting(bp, true, false);
9473         return 0;
9474 }
9475
9476 static void bnxt_get_port_module_status(struct bnxt *bp)
9477 {
9478         struct bnxt_link_info *link_info = &bp->link_info;
9479         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9480         u8 module_status;
9481
9482         if (bnxt_update_link(bp, true))
9483                 return;
9484
9485         module_status = link_info->module_status;
9486         switch (module_status) {
9487         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9488         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9489         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9490                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9491                             bp->pf.port_id);
9492                 if (bp->hwrm_spec_code >= 0x10201) {
9493                         netdev_warn(bp->dev, "Module part number %s\n",
9494                                     resp->phy_vendor_partnumber);
9495                 }
9496                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9497                         netdev_warn(bp->dev, "TX is disabled\n");
9498                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9499                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9500         }
9501 }
9502
9503 static void
9504 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9505 {
9506         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9507                 if (bp->hwrm_spec_code >= 0x10201)
9508                         req->auto_pause =
9509                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9510                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9511                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9512                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9513                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9514                 req->enables |=
9515                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9516         } else {
9517                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9518                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9519                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9520                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9521                 req->enables |=
9522                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9523                 if (bp->hwrm_spec_code >= 0x10201) {
9524                         req->auto_pause = req->force_pause;
9525                         req->enables |= cpu_to_le32(
9526                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9527                 }
9528         }
9529 }
9530
9531 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9532 {
9533         if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9534                 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9535                 if (bp->link_info.advertising) {
9536                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9537                         req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9538                 }
9539                 if (bp->link_info.advertising_pam4) {
9540                         req->enables |=
9541                                 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9542                         req->auto_link_pam4_speed_mask =
9543                                 cpu_to_le16(bp->link_info.advertising_pam4);
9544                 }
9545                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9546                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9547         } else {
9548                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9549                 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9550                         req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9551                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9552                 } else {
9553                         req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9554                 }
9555         }
9556
9557         /* tell chimp that the setting takes effect immediately */
9558         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9559 }
9560
9561 int bnxt_hwrm_set_pause(struct bnxt *bp)
9562 {
9563         struct hwrm_port_phy_cfg_input *req;
9564         int rc;
9565
9566         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9567         if (rc)
9568                 return rc;
9569
9570         bnxt_hwrm_set_pause_common(bp, req);
9571
9572         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9573             bp->link_info.force_link_chng)
9574                 bnxt_hwrm_set_link_common(bp, req);
9575
9576         rc = hwrm_req_send(bp, req);
9577         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9578                 /* since changing of pause setting doesn't trigger any link
9579                  * change event, the driver needs to update the current pause
9580                  * result upon successfully return of the phy_cfg command
9581                  */
9582                 bp->link_info.pause =
9583                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9584                 bp->link_info.auto_pause_setting = 0;
9585                 if (!bp->link_info.force_link_chng)
9586                         bnxt_report_link(bp);
9587         }
9588         bp->link_info.force_link_chng = false;
9589         return rc;
9590 }
9591
9592 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9593                               struct hwrm_port_phy_cfg_input *req)
9594 {
9595         struct ethtool_eee *eee = &bp->eee;
9596
9597         if (eee->eee_enabled) {
9598                 u16 eee_speeds;
9599                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9600
9601                 if (eee->tx_lpi_enabled)
9602                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9603                 else
9604                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9605
9606                 req->flags |= cpu_to_le32(flags);
9607                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9608                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9609                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9610         } else {
9611                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9612         }
9613 }
9614
9615 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9616 {
9617         struct hwrm_port_phy_cfg_input *req;
9618         int rc;
9619
9620         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9621         if (rc)
9622                 return rc;
9623
9624         if (set_pause)
9625                 bnxt_hwrm_set_pause_common(bp, req);
9626
9627         bnxt_hwrm_set_link_common(bp, req);
9628
9629         if (set_eee)
9630                 bnxt_hwrm_set_eee(bp, req);
9631         return hwrm_req_send(bp, req);
9632 }
9633
9634 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9635 {
9636         struct hwrm_port_phy_cfg_input *req;
9637         int rc;
9638
9639         if (!BNXT_SINGLE_PF(bp))
9640                 return 0;
9641
9642         if (pci_num_vf(bp->pdev) &&
9643             !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9644                 return 0;
9645
9646         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9647         if (rc)
9648                 return rc;
9649
9650         req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9651         return hwrm_req_send(bp, req);
9652 }
9653
9654 static int bnxt_fw_init_one(struct bnxt *bp);
9655
9656 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9657 {
9658 #ifdef CONFIG_TEE_BNXT_FW
9659         int rc = tee_bnxt_fw_load();
9660
9661         if (rc)
9662                 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9663
9664         return rc;
9665 #else
9666         netdev_err(bp->dev, "OP-TEE not supported\n");
9667         return -ENODEV;
9668 #endif
9669 }
9670
9671 static int bnxt_try_recover_fw(struct bnxt *bp)
9672 {
9673         if (bp->fw_health && bp->fw_health->status_reliable) {
9674                 int retry = 0, rc;
9675                 u32 sts;
9676
9677                 do {
9678                         sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9679                         rc = bnxt_hwrm_poll(bp);
9680                         if (!BNXT_FW_IS_BOOTING(sts) &&
9681                             !BNXT_FW_IS_RECOVERING(sts))
9682                                 break;
9683                         retry++;
9684                 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9685
9686                 if (!BNXT_FW_IS_HEALTHY(sts)) {
9687                         netdev_err(bp->dev,
9688                                    "Firmware not responding, status: 0x%x\n",
9689                                    sts);
9690                         rc = -ENODEV;
9691                 }
9692                 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9693                         netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9694                         return bnxt_fw_reset_via_optee(bp);
9695                 }
9696                 return rc;
9697         }
9698
9699         return -ENODEV;
9700 }
9701
9702 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9703 {
9704         struct hwrm_func_drv_if_change_output *resp;
9705         struct hwrm_func_drv_if_change_input *req;
9706         bool fw_reset = !bp->irq_tbl;
9707         bool resc_reinit = false;
9708         int rc, retry = 0;
9709         u32 flags = 0;
9710
9711         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9712                 return 0;
9713
9714         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
9715         if (rc)
9716                 return rc;
9717
9718         if (up)
9719                 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9720         resp = hwrm_req_hold(bp, req);
9721
9722         hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
9723         while (retry < BNXT_FW_IF_RETRY) {
9724                 rc = hwrm_req_send(bp, req);
9725                 if (rc != -EAGAIN)
9726                         break;
9727
9728                 msleep(50);
9729                 retry++;
9730         }
9731
9732         if (rc == -EAGAIN) {
9733                 hwrm_req_drop(bp, req);
9734                 return rc;
9735         } else if (!rc) {
9736                 flags = le32_to_cpu(resp->flags);
9737         } else if (up) {
9738                 rc = bnxt_try_recover_fw(bp);
9739                 fw_reset = true;
9740         }
9741         hwrm_req_drop(bp, req);
9742         if (rc)
9743                 return rc;
9744
9745         if (!up) {
9746                 bnxt_inv_fw_health_reg(bp);
9747                 return 0;
9748         }
9749
9750         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9751                 resc_reinit = true;
9752         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9753                 fw_reset = true;
9754         else if (bp->fw_health && !bp->fw_health->status_reliable)
9755                 bnxt_try_map_fw_health_reg(bp);
9756
9757         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9758                 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9759                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9760                 return -ENODEV;
9761         }
9762         if (resc_reinit || fw_reset) {
9763                 if (fw_reset) {
9764                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9765                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9766                                 bnxt_ulp_stop(bp);
9767                         bnxt_free_ctx_mem(bp);
9768                         kfree(bp->ctx);
9769                         bp->ctx = NULL;
9770                         bnxt_dcb_free(bp);
9771                         rc = bnxt_fw_init_one(bp);
9772                         if (rc) {
9773                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9774                                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9775                                 return rc;
9776                         }
9777                         bnxt_clear_int_mode(bp);
9778                         rc = bnxt_init_int_mode(bp);
9779                         if (rc) {
9780                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9781                                 netdev_err(bp->dev, "init int mode failed\n");
9782                                 return rc;
9783                         }
9784                 }
9785                 if (BNXT_NEW_RM(bp)) {
9786                         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9787
9788                         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9789                         if (rc)
9790                                 netdev_err(bp->dev, "resc_qcaps failed\n");
9791
9792                         hw_resc->resv_cp_rings = 0;
9793                         hw_resc->resv_stat_ctxs = 0;
9794                         hw_resc->resv_irqs = 0;
9795                         hw_resc->resv_tx_rings = 0;
9796                         hw_resc->resv_rx_rings = 0;
9797                         hw_resc->resv_hw_ring_grps = 0;
9798                         hw_resc->resv_vnics = 0;
9799                         if (!fw_reset) {
9800                                 bp->tx_nr_rings = 0;
9801                                 bp->rx_nr_rings = 0;
9802                         }
9803                 }
9804         }
9805         return rc;
9806 }
9807
9808 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9809 {
9810         struct hwrm_port_led_qcaps_output *resp;
9811         struct hwrm_port_led_qcaps_input *req;
9812         struct bnxt_pf_info *pf = &bp->pf;
9813         int rc;
9814
9815         bp->num_leds = 0;
9816         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9817                 return 0;
9818
9819         rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
9820         if (rc)
9821                 return rc;
9822
9823         req->port_id = cpu_to_le16(pf->port_id);
9824         resp = hwrm_req_hold(bp, req);
9825         rc = hwrm_req_send(bp, req);
9826         if (rc) {
9827                 hwrm_req_drop(bp, req);
9828                 return rc;
9829         }
9830         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9831                 int i;
9832
9833                 bp->num_leds = resp->num_leds;
9834                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9835                                                  bp->num_leds);
9836                 for (i = 0; i < bp->num_leds; i++) {
9837                         struct bnxt_led_info *led = &bp->leds[i];
9838                         __le16 caps = led->led_state_caps;
9839
9840                         if (!led->led_group_id ||
9841                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
9842                                 bp->num_leds = 0;
9843                                 break;
9844                         }
9845                 }
9846         }
9847         hwrm_req_drop(bp, req);
9848         return 0;
9849 }
9850
9851 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9852 {
9853         struct hwrm_wol_filter_alloc_output *resp;
9854         struct hwrm_wol_filter_alloc_input *req;
9855         int rc;
9856
9857         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
9858         if (rc)
9859                 return rc;
9860
9861         req->port_id = cpu_to_le16(bp->pf.port_id);
9862         req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9863         req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9864         memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
9865
9866         resp = hwrm_req_hold(bp, req);
9867         rc = hwrm_req_send(bp, req);
9868         if (!rc)
9869                 bp->wol_filter_id = resp->wol_filter_id;
9870         hwrm_req_drop(bp, req);
9871         return rc;
9872 }
9873
9874 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9875 {
9876         struct hwrm_wol_filter_free_input *req;
9877         int rc;
9878
9879         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
9880         if (rc)
9881                 return rc;
9882
9883         req->port_id = cpu_to_le16(bp->pf.port_id);
9884         req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9885         req->wol_filter_id = bp->wol_filter_id;
9886
9887         return hwrm_req_send(bp, req);
9888 }
9889
9890 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9891 {
9892         struct hwrm_wol_filter_qcfg_output *resp;
9893         struct hwrm_wol_filter_qcfg_input *req;
9894         u16 next_handle = 0;
9895         int rc;
9896
9897         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
9898         if (rc)
9899                 return rc;
9900
9901         req->port_id = cpu_to_le16(bp->pf.port_id);
9902         req->handle = cpu_to_le16(handle);
9903         resp = hwrm_req_hold(bp, req);
9904         rc = hwrm_req_send(bp, req);
9905         if (!rc) {
9906                 next_handle = le16_to_cpu(resp->next_handle);
9907                 if (next_handle != 0) {
9908                         if (resp->wol_type ==
9909                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9910                                 bp->wol = 1;
9911                                 bp->wol_filter_id = resp->wol_filter_id;
9912                         }
9913                 }
9914         }
9915         hwrm_req_drop(bp, req);
9916         return next_handle;
9917 }
9918
9919 static void bnxt_get_wol_settings(struct bnxt *bp)
9920 {
9921         u16 handle = 0;
9922
9923         bp->wol = 0;
9924         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9925                 return;
9926
9927         do {
9928                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9929         } while (handle && handle != 0xffff);
9930 }
9931
9932 #ifdef CONFIG_BNXT_HWMON
9933 static ssize_t bnxt_show_temp(struct device *dev,
9934                               struct device_attribute *devattr, char *buf)
9935 {
9936         struct hwrm_temp_monitor_query_output *resp;
9937         struct hwrm_temp_monitor_query_input *req;
9938         struct bnxt *bp = dev_get_drvdata(dev);
9939         u32 len = 0;
9940         int rc;
9941
9942         rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
9943         if (rc)
9944                 return rc;
9945         resp = hwrm_req_hold(bp, req);
9946         rc = hwrm_req_send(bp, req);
9947         if (!rc)
9948                 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9949         hwrm_req_drop(bp, req);
9950         if (rc)
9951                 return rc;
9952         return len;
9953 }
9954 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9955
9956 static struct attribute *bnxt_attrs[] = {
9957         &sensor_dev_attr_temp1_input.dev_attr.attr,
9958         NULL
9959 };
9960 ATTRIBUTE_GROUPS(bnxt);
9961
9962 static void bnxt_hwmon_close(struct bnxt *bp)
9963 {
9964         if (bp->hwmon_dev) {
9965                 hwmon_device_unregister(bp->hwmon_dev);
9966                 bp->hwmon_dev = NULL;
9967         }
9968 }
9969
9970 static void bnxt_hwmon_open(struct bnxt *bp)
9971 {
9972         struct hwrm_temp_monitor_query_input *req;
9973         struct pci_dev *pdev = bp->pdev;
9974         int rc;
9975
9976         rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
9977         if (!rc)
9978                 rc = hwrm_req_send_silent(bp, req);
9979         if (rc == -EACCES || rc == -EOPNOTSUPP) {
9980                 bnxt_hwmon_close(bp);
9981                 return;
9982         }
9983
9984         if (bp->hwmon_dev)
9985                 return;
9986
9987         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9988                                                           DRV_MODULE_NAME, bp,
9989                                                           bnxt_groups);
9990         if (IS_ERR(bp->hwmon_dev)) {
9991                 bp->hwmon_dev = NULL;
9992                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9993         }
9994 }
9995 #else
9996 static void bnxt_hwmon_close(struct bnxt *bp)
9997 {
9998 }
9999
10000 static void bnxt_hwmon_open(struct bnxt *bp)
10001 {
10002 }
10003 #endif
10004
10005 static bool bnxt_eee_config_ok(struct bnxt *bp)
10006 {
10007         struct ethtool_eee *eee = &bp->eee;
10008         struct bnxt_link_info *link_info = &bp->link_info;
10009
10010         if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10011                 return true;
10012
10013         if (eee->eee_enabled) {
10014                 u32 advertising =
10015                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10016
10017                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10018                         eee->eee_enabled = 0;
10019                         return false;
10020                 }
10021                 if (eee->advertised & ~advertising) {
10022                         eee->advertised = advertising & eee->supported;
10023                         return false;
10024                 }
10025         }
10026         return true;
10027 }
10028
10029 static int bnxt_update_phy_setting(struct bnxt *bp)
10030 {
10031         int rc;
10032         bool update_link = false;
10033         bool update_pause = false;
10034         bool update_eee = false;
10035         struct bnxt_link_info *link_info = &bp->link_info;
10036
10037         rc = bnxt_update_link(bp, true);
10038         if (rc) {
10039                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10040                            rc);
10041                 return rc;
10042         }
10043         if (!BNXT_SINGLE_PF(bp))
10044                 return 0;
10045
10046         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10047             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10048             link_info->req_flow_ctrl)
10049                 update_pause = true;
10050         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10051             link_info->force_pause_setting != link_info->req_flow_ctrl)
10052                 update_pause = true;
10053         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10054                 if (BNXT_AUTO_MODE(link_info->auto_mode))
10055                         update_link = true;
10056                 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10057                     link_info->req_link_speed != link_info->force_link_speed)
10058                         update_link = true;
10059                 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10060                          link_info->req_link_speed != link_info->force_pam4_link_speed)
10061                         update_link = true;
10062                 if (link_info->req_duplex != link_info->duplex_setting)
10063                         update_link = true;
10064         } else {
10065                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10066                         update_link = true;
10067                 if (link_info->advertising != link_info->auto_link_speeds ||
10068                     link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10069                         update_link = true;
10070         }
10071
10072         /* The last close may have shutdown the link, so need to call
10073          * PHY_CFG to bring it back up.
10074          */
10075         if (!bp->link_info.link_up)
10076                 update_link = true;
10077
10078         if (!bnxt_eee_config_ok(bp))
10079                 update_eee = true;
10080
10081         if (update_link)
10082                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10083         else if (update_pause)
10084                 rc = bnxt_hwrm_set_pause(bp);
10085         if (rc) {
10086                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10087                            rc);
10088                 return rc;
10089         }
10090
10091         return rc;
10092 }
10093
10094 /* Common routine to pre-map certain register block to different GRC window.
10095  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10096  * in PF and 3 windows in VF that can be customized to map in different
10097  * register blocks.
10098  */
10099 static void bnxt_preset_reg_win(struct bnxt *bp)
10100 {
10101         if (BNXT_PF(bp)) {
10102                 /* CAG registers map to GRC window #4 */
10103                 writel(BNXT_CAG_REG_BASE,
10104                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10105         }
10106 }
10107
10108 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10109
10110 static int bnxt_reinit_after_abort(struct bnxt *bp)
10111 {
10112         int rc;
10113
10114         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10115                 return -EBUSY;
10116
10117         if (bp->dev->reg_state == NETREG_UNREGISTERED)
10118                 return -ENODEV;
10119
10120         rc = bnxt_fw_init_one(bp);
10121         if (!rc) {
10122                 bnxt_clear_int_mode(bp);
10123                 rc = bnxt_init_int_mode(bp);
10124                 if (!rc) {
10125                         clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10126                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10127                 }
10128         }
10129         return rc;
10130 }
10131
10132 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10133 {
10134         int rc = 0;
10135
10136         bnxt_preset_reg_win(bp);
10137         netif_carrier_off(bp->dev);
10138         if (irq_re_init) {
10139                 /* Reserve rings now if none were reserved at driver probe. */
10140                 rc = bnxt_init_dflt_ring_mode(bp);
10141                 if (rc) {
10142                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10143                         return rc;
10144                 }
10145         }
10146         rc = bnxt_reserve_rings(bp, irq_re_init);
10147         if (rc)
10148                 return rc;
10149         if ((bp->flags & BNXT_FLAG_RFS) &&
10150             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10151                 /* disable RFS if falling back to INTA */
10152                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10153                 bp->flags &= ~BNXT_FLAG_RFS;
10154         }
10155
10156         rc = bnxt_alloc_mem(bp, irq_re_init);
10157         if (rc) {
10158                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10159                 goto open_err_free_mem;
10160         }
10161
10162         if (irq_re_init) {
10163                 bnxt_init_napi(bp);
10164                 rc = bnxt_request_irq(bp);
10165                 if (rc) {
10166                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10167                         goto open_err_irq;
10168                 }
10169         }
10170
10171         rc = bnxt_init_nic(bp, irq_re_init);
10172         if (rc) {
10173                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10174                 goto open_err_irq;
10175         }
10176
10177         bnxt_enable_napi(bp);
10178         bnxt_debug_dev_init(bp);
10179
10180         if (link_re_init) {
10181                 mutex_lock(&bp->link_lock);
10182                 rc = bnxt_update_phy_setting(bp);
10183                 mutex_unlock(&bp->link_lock);
10184                 if (rc) {
10185                         netdev_warn(bp->dev, "failed to update phy settings\n");
10186                         if (BNXT_SINGLE_PF(bp)) {
10187                                 bp->link_info.phy_retry = true;
10188                                 bp->link_info.phy_retry_expires =
10189                                         jiffies + 5 * HZ;
10190                         }
10191                 }
10192         }
10193
10194         if (irq_re_init)
10195                 udp_tunnel_nic_reset_ntf(bp->dev);
10196
10197         set_bit(BNXT_STATE_OPEN, &bp->state);
10198         bnxt_enable_int(bp);
10199         /* Enable TX queues */
10200         bnxt_tx_enable(bp);
10201         mod_timer(&bp->timer, jiffies + bp->current_interval);
10202         /* Poll link status and check for SFP+ module status */
10203         mutex_lock(&bp->link_lock);
10204         bnxt_get_port_module_status(bp);
10205         mutex_unlock(&bp->link_lock);
10206
10207         /* VF-reps may need to be re-opened after the PF is re-opened */
10208         if (BNXT_PF(bp))
10209                 bnxt_vf_reps_open(bp);
10210         return 0;
10211
10212 open_err_irq:
10213         bnxt_del_napi(bp);
10214
10215 open_err_free_mem:
10216         bnxt_free_skbs(bp);
10217         bnxt_free_irq(bp);
10218         bnxt_free_mem(bp, true);
10219         return rc;
10220 }
10221
10222 /* rtnl_lock held */
10223 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10224 {
10225         int rc = 0;
10226
10227         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10228                 rc = -EIO;
10229         if (!rc)
10230                 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10231         if (rc) {
10232                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10233                 dev_close(bp->dev);
10234         }
10235         return rc;
10236 }
10237
10238 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10239  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
10240  * self tests.
10241  */
10242 int bnxt_half_open_nic(struct bnxt *bp)
10243 {
10244         int rc = 0;
10245
10246         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10247                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10248                 rc = -ENODEV;
10249                 goto half_open_err;
10250         }
10251
10252         rc = bnxt_alloc_mem(bp, false);
10253         if (rc) {
10254                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10255                 goto half_open_err;
10256         }
10257         rc = bnxt_init_nic(bp, false);
10258         if (rc) {
10259                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10260                 goto half_open_err;
10261         }
10262         return 0;
10263
10264 half_open_err:
10265         bnxt_free_skbs(bp);
10266         bnxt_free_mem(bp, false);
10267         dev_close(bp->dev);
10268         return rc;
10269 }
10270
10271 /* rtnl_lock held, this call can only be made after a previous successful
10272  * call to bnxt_half_open_nic().
10273  */
10274 void bnxt_half_close_nic(struct bnxt *bp)
10275 {
10276         bnxt_hwrm_resource_free(bp, false, false);
10277         bnxt_free_skbs(bp);
10278         bnxt_free_mem(bp, false);
10279 }
10280
10281 static void bnxt_reenable_sriov(struct bnxt *bp)
10282 {
10283         if (BNXT_PF(bp)) {
10284                 struct bnxt_pf_info *pf = &bp->pf;
10285                 int n = pf->active_vfs;
10286
10287                 if (n)
10288                         bnxt_cfg_hw_sriov(bp, &n, true);
10289         }
10290 }
10291
10292 static int bnxt_open(struct net_device *dev)
10293 {
10294         struct bnxt *bp = netdev_priv(dev);
10295         int rc;
10296
10297         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10298                 rc = bnxt_reinit_after_abort(bp);
10299                 if (rc) {
10300                         if (rc == -EBUSY)
10301                                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10302                         else
10303                                 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10304                         return -ENODEV;
10305                 }
10306         }
10307
10308         rc = bnxt_hwrm_if_change(bp, true);
10309         if (rc)
10310                 return rc;
10311
10312         rc = __bnxt_open_nic(bp, true, true);
10313         if (rc) {
10314                 bnxt_hwrm_if_change(bp, false);
10315         } else {
10316                 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10317                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10318                                 bnxt_ulp_start(bp, 0);
10319                                 bnxt_reenable_sriov(bp);
10320                         }
10321                 }
10322                 bnxt_hwmon_open(bp);
10323         }
10324
10325         return rc;
10326 }
10327
10328 static bool bnxt_drv_busy(struct bnxt *bp)
10329 {
10330         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10331                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10332 }
10333
10334 static void bnxt_get_ring_stats(struct bnxt *bp,
10335                                 struct rtnl_link_stats64 *stats);
10336
10337 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10338                              bool link_re_init)
10339 {
10340         /* Close the VF-reps before closing PF */
10341         if (BNXT_PF(bp))
10342                 bnxt_vf_reps_close(bp);
10343
10344         /* Change device state to avoid TX queue wake up's */
10345         bnxt_tx_disable(bp);
10346
10347         clear_bit(BNXT_STATE_OPEN, &bp->state);
10348         smp_mb__after_atomic();
10349         while (bnxt_drv_busy(bp))
10350                 msleep(20);
10351
10352         /* Flush rings and and disable interrupts */
10353         bnxt_shutdown_nic(bp, irq_re_init);
10354
10355         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10356
10357         bnxt_debug_dev_exit(bp);
10358         bnxt_disable_napi(bp);
10359         del_timer_sync(&bp->timer);
10360         bnxt_free_skbs(bp);
10361
10362         /* Save ring stats before shutdown */
10363         if (bp->bnapi && irq_re_init)
10364                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10365         if (irq_re_init) {
10366                 bnxt_free_irq(bp);
10367                 bnxt_del_napi(bp);
10368         }
10369         bnxt_free_mem(bp, irq_re_init);
10370 }
10371
10372 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10373 {
10374         int rc = 0;
10375
10376         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10377                 /* If we get here, it means firmware reset is in progress
10378                  * while we are trying to close.  We can safely proceed with
10379                  * the close because we are holding rtnl_lock().  Some firmware
10380                  * messages may fail as we proceed to close.  We set the
10381                  * ABORT_ERR flag here so that the FW reset thread will later
10382                  * abort when it gets the rtnl_lock() and sees the flag.
10383                  */
10384                 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10385                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10386         }
10387
10388 #ifdef CONFIG_BNXT_SRIOV
10389         if (bp->sriov_cfg) {
10390                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10391                                                       !bp->sriov_cfg,
10392                                                       BNXT_SRIOV_CFG_WAIT_TMO);
10393                 if (rc)
10394                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10395         }
10396 #endif
10397         __bnxt_close_nic(bp, irq_re_init, link_re_init);
10398         return rc;
10399 }
10400
10401 static int bnxt_close(struct net_device *dev)
10402 {
10403         struct bnxt *bp = netdev_priv(dev);
10404
10405         bnxt_hwmon_close(bp);
10406         bnxt_close_nic(bp, true, true);
10407         bnxt_hwrm_shutdown_link(bp);
10408         bnxt_hwrm_if_change(bp, false);
10409         return 0;
10410 }
10411
10412 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10413                                    u16 *val)
10414 {
10415         struct hwrm_port_phy_mdio_read_output *resp;
10416         struct hwrm_port_phy_mdio_read_input *req;
10417         int rc;
10418
10419         if (bp->hwrm_spec_code < 0x10a00)
10420                 return -EOPNOTSUPP;
10421
10422         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10423         if (rc)
10424                 return rc;
10425
10426         req->port_id = cpu_to_le16(bp->pf.port_id);
10427         req->phy_addr = phy_addr;
10428         req->reg_addr = cpu_to_le16(reg & 0x1f);
10429         if (mdio_phy_id_is_c45(phy_addr)) {
10430                 req->cl45_mdio = 1;
10431                 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10432                 req->dev_addr = mdio_phy_id_devad(phy_addr);
10433                 req->reg_addr = cpu_to_le16(reg);
10434         }
10435
10436         resp = hwrm_req_hold(bp, req);
10437         rc = hwrm_req_send(bp, req);
10438         if (!rc)
10439                 *val = le16_to_cpu(resp->reg_data);
10440         hwrm_req_drop(bp, req);
10441         return rc;
10442 }
10443
10444 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10445                                     u16 val)
10446 {
10447         struct hwrm_port_phy_mdio_write_input *req;
10448         int rc;
10449
10450         if (bp->hwrm_spec_code < 0x10a00)
10451                 return -EOPNOTSUPP;
10452
10453         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10454         if (rc)
10455                 return rc;
10456
10457         req->port_id = cpu_to_le16(bp->pf.port_id);
10458         req->phy_addr = phy_addr;
10459         req->reg_addr = cpu_to_le16(reg & 0x1f);
10460         if (mdio_phy_id_is_c45(phy_addr)) {
10461                 req->cl45_mdio = 1;
10462                 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10463                 req->dev_addr = mdio_phy_id_devad(phy_addr);
10464                 req->reg_addr = cpu_to_le16(reg);
10465         }
10466         req->reg_data = cpu_to_le16(val);
10467
10468         return hwrm_req_send(bp, req);
10469 }
10470
10471 /* rtnl_lock held */
10472 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10473 {
10474         struct mii_ioctl_data *mdio = if_mii(ifr);
10475         struct bnxt *bp = netdev_priv(dev);
10476         int rc;
10477
10478         switch (cmd) {
10479         case SIOCGMIIPHY:
10480                 mdio->phy_id = bp->link_info.phy_addr;
10481
10482                 fallthrough;
10483         case SIOCGMIIREG: {
10484                 u16 mii_regval = 0;
10485
10486                 if (!netif_running(dev))
10487                         return -EAGAIN;
10488
10489                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10490                                              &mii_regval);
10491                 mdio->val_out = mii_regval;
10492                 return rc;
10493         }
10494
10495         case SIOCSMIIREG:
10496                 if (!netif_running(dev))
10497                         return -EAGAIN;
10498
10499                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10500                                                 mdio->val_in);
10501
10502         case SIOCSHWTSTAMP:
10503                 return bnxt_hwtstamp_set(dev, ifr);
10504
10505         case SIOCGHWTSTAMP:
10506                 return bnxt_hwtstamp_get(dev, ifr);
10507
10508         default:
10509                 /* do nothing */
10510                 break;
10511         }
10512         return -EOPNOTSUPP;
10513 }
10514
10515 static void bnxt_get_ring_stats(struct bnxt *bp,
10516                                 struct rtnl_link_stats64 *stats)
10517 {
10518         int i;
10519
10520         for (i = 0; i < bp->cp_nr_rings; i++) {
10521                 struct bnxt_napi *bnapi = bp->bnapi[i];
10522                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10523                 u64 *sw = cpr->stats.sw_stats;
10524
10525                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10526                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10527                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10528
10529                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10530                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10531                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10532
10533                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10534                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10535                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10536
10537                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10538                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10539                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10540
10541                 stats->rx_missed_errors +=
10542                         BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10543
10544                 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10545
10546                 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10547
10548                 stats->rx_dropped +=
10549                         cpr->sw_stats.rx.rx_netpoll_discards +
10550                         cpr->sw_stats.rx.rx_oom_discards;
10551         }
10552 }
10553
10554 static void bnxt_add_prev_stats(struct bnxt *bp,
10555                                 struct rtnl_link_stats64 *stats)
10556 {
10557         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10558
10559         stats->rx_packets += prev_stats->rx_packets;
10560         stats->tx_packets += prev_stats->tx_packets;
10561         stats->rx_bytes += prev_stats->rx_bytes;
10562         stats->tx_bytes += prev_stats->tx_bytes;
10563         stats->rx_missed_errors += prev_stats->rx_missed_errors;
10564         stats->multicast += prev_stats->multicast;
10565         stats->rx_dropped += prev_stats->rx_dropped;
10566         stats->tx_dropped += prev_stats->tx_dropped;
10567 }
10568
10569 static void
10570 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10571 {
10572         struct bnxt *bp = netdev_priv(dev);
10573
10574         set_bit(BNXT_STATE_READ_STATS, &bp->state);
10575         /* Make sure bnxt_close_nic() sees that we are reading stats before
10576          * we check the BNXT_STATE_OPEN flag.
10577          */
10578         smp_mb__after_atomic();
10579         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10580                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10581                 *stats = bp->net_stats_prev;
10582                 return;
10583         }
10584
10585         bnxt_get_ring_stats(bp, stats);
10586         bnxt_add_prev_stats(bp, stats);
10587
10588         if (bp->flags & BNXT_FLAG_PORT_STATS) {
10589                 u64 *rx = bp->port_stats.sw_stats;
10590                 u64 *tx = bp->port_stats.sw_stats +
10591                           BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10592
10593                 stats->rx_crc_errors =
10594                         BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10595                 stats->rx_frame_errors =
10596                         BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10597                 stats->rx_length_errors =
10598                         BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10599                         BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10600                         BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10601                 stats->rx_errors =
10602                         BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10603                         BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10604                 stats->collisions =
10605                         BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10606                 stats->tx_fifo_errors =
10607                         BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10608                 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10609         }
10610         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10611 }
10612
10613 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10614 {
10615         struct net_device *dev = bp->dev;
10616         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10617         struct netdev_hw_addr *ha;
10618         u8 *haddr;
10619         int mc_count = 0;
10620         bool update = false;
10621         int off = 0;
10622
10623         netdev_for_each_mc_addr(ha, dev) {
10624                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10625                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10626                         vnic->mc_list_count = 0;
10627                         return false;
10628                 }
10629                 haddr = ha->addr;
10630                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10631                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10632                         update = true;
10633                 }
10634                 off += ETH_ALEN;
10635                 mc_count++;
10636         }
10637         if (mc_count)
10638                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10639
10640         if (mc_count != vnic->mc_list_count) {
10641                 vnic->mc_list_count = mc_count;
10642                 update = true;
10643         }
10644         return update;
10645 }
10646
10647 static bool bnxt_uc_list_updated(struct bnxt *bp)
10648 {
10649         struct net_device *dev = bp->dev;
10650         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10651         struct netdev_hw_addr *ha;
10652         int off = 0;
10653
10654         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10655                 return true;
10656
10657         netdev_for_each_uc_addr(ha, dev) {
10658                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10659                         return true;
10660
10661                 off += ETH_ALEN;
10662         }
10663         return false;
10664 }
10665
10666 static void bnxt_set_rx_mode(struct net_device *dev)
10667 {
10668         struct bnxt *bp = netdev_priv(dev);
10669         struct bnxt_vnic_info *vnic;
10670         bool mc_update = false;
10671         bool uc_update;
10672         u32 mask;
10673
10674         if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10675                 return;
10676
10677         vnic = &bp->vnic_info[0];
10678         mask = vnic->rx_mask;
10679         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10680                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10681                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10682                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10683
10684         if (dev->flags & IFF_PROMISC)
10685                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10686
10687         uc_update = bnxt_uc_list_updated(bp);
10688
10689         if (dev->flags & IFF_BROADCAST)
10690                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10691         if (dev->flags & IFF_ALLMULTI) {
10692                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10693                 vnic->mc_list_count = 0;
10694         } else {
10695                 mc_update = bnxt_mc_list_updated(bp, &mask);
10696         }
10697
10698         if (mask != vnic->rx_mask || uc_update || mc_update) {
10699                 vnic->rx_mask = mask;
10700
10701                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10702                 bnxt_queue_sp_work(bp);
10703         }
10704 }
10705
10706 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10707 {
10708         struct net_device *dev = bp->dev;
10709         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10710         struct hwrm_cfa_l2_filter_free_input *req;
10711         struct netdev_hw_addr *ha;
10712         int i, off = 0, rc;
10713         bool uc_update;
10714
10715         netif_addr_lock_bh(dev);
10716         uc_update = bnxt_uc_list_updated(bp);
10717         netif_addr_unlock_bh(dev);
10718
10719         if (!uc_update)
10720                 goto skip_uc;
10721
10722         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
10723         if (rc)
10724                 return rc;
10725         hwrm_req_hold(bp, req);
10726         for (i = 1; i < vnic->uc_filter_count; i++) {
10727                 req->l2_filter_id = vnic->fw_l2_filter_id[i];
10728
10729                 rc = hwrm_req_send(bp, req);
10730         }
10731         hwrm_req_drop(bp, req);
10732
10733         vnic->uc_filter_count = 1;
10734
10735         netif_addr_lock_bh(dev);
10736         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10737                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10738         } else {
10739                 netdev_for_each_uc_addr(ha, dev) {
10740                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10741                         off += ETH_ALEN;
10742                         vnic->uc_filter_count++;
10743                 }
10744         }
10745         netif_addr_unlock_bh(dev);
10746
10747         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10748                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10749                 if (rc) {
10750                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10751                                    rc);
10752                         vnic->uc_filter_count = i;
10753                         return rc;
10754                 }
10755         }
10756
10757 skip_uc:
10758         if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10759             !bnxt_promisc_ok(bp))
10760                 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10761         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10762         if (rc && vnic->mc_list_count) {
10763                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10764                             rc);
10765                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10766                 vnic->mc_list_count = 0;
10767                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10768         }
10769         if (rc)
10770                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10771                            rc);
10772
10773         return rc;
10774 }
10775
10776 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10777 {
10778 #ifdef CONFIG_BNXT_SRIOV
10779         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10780                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10781
10782                 /* No minimum rings were provisioned by the PF.  Don't
10783                  * reserve rings by default when device is down.
10784                  */
10785                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10786                         return true;
10787
10788                 if (!netif_running(bp->dev))
10789                         return false;
10790         }
10791 #endif
10792         return true;
10793 }
10794
10795 /* If the chip and firmware supports RFS */
10796 static bool bnxt_rfs_supported(struct bnxt *bp)
10797 {
10798         if (bp->flags & BNXT_FLAG_CHIP_P5) {
10799                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10800                         return true;
10801                 return false;
10802         }
10803         /* 212 firmware is broken for aRFS */
10804         if (BNXT_FW_MAJ(bp) == 212)
10805                 return false;
10806         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10807                 return true;
10808         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10809                 return true;
10810         return false;
10811 }
10812
10813 /* If runtime conditions support RFS */
10814 static bool bnxt_rfs_capable(struct bnxt *bp)
10815 {
10816 #ifdef CONFIG_RFS_ACCEL
10817         int vnics, max_vnics, max_rss_ctxs;
10818
10819         if (bp->flags & BNXT_FLAG_CHIP_P5)
10820                 return bnxt_rfs_supported(bp);
10821         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10822                 return false;
10823
10824         vnics = 1 + bp->rx_nr_rings;
10825         max_vnics = bnxt_get_max_func_vnics(bp);
10826         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10827
10828         /* RSS contexts not a limiting factor */
10829         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10830                 max_rss_ctxs = max_vnics;
10831         if (vnics > max_vnics || vnics > max_rss_ctxs) {
10832                 if (bp->rx_nr_rings > 1)
10833                         netdev_warn(bp->dev,
10834                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10835                                     min(max_rss_ctxs - 1, max_vnics - 1));
10836                 return false;
10837         }
10838
10839         if (!BNXT_NEW_RM(bp))
10840                 return true;
10841
10842         if (vnics == bp->hw_resc.resv_vnics)
10843                 return true;
10844
10845         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10846         if (vnics <= bp->hw_resc.resv_vnics)
10847                 return true;
10848
10849         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10850         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10851         return false;
10852 #else
10853         return false;
10854 #endif
10855 }
10856
10857 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10858                                            netdev_features_t features)
10859 {
10860         struct bnxt *bp = netdev_priv(dev);
10861         netdev_features_t vlan_features;
10862
10863         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10864                 features &= ~NETIF_F_NTUPLE;
10865
10866         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10867                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10868
10869         if (!(features & NETIF_F_GRO))
10870                 features &= ~NETIF_F_GRO_HW;
10871
10872         if (features & NETIF_F_GRO_HW)
10873                 features &= ~NETIF_F_LRO;
10874
10875         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10876          * turned on or off together.
10877          */
10878         vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10879         if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10880                 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10881                         features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10882                 else if (vlan_features)
10883                         features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10884         }
10885 #ifdef CONFIG_BNXT_SRIOV
10886         if (BNXT_VF(bp) && bp->vf.vlan)
10887                 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10888 #endif
10889         return features;
10890 }
10891
10892 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10893 {
10894         struct bnxt *bp = netdev_priv(dev);
10895         u32 flags = bp->flags;
10896         u32 changes;
10897         int rc = 0;
10898         bool re_init = false;
10899         bool update_tpa = false;
10900
10901         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10902         if (features & NETIF_F_GRO_HW)
10903                 flags |= BNXT_FLAG_GRO;
10904         else if (features & NETIF_F_LRO)
10905                 flags |= BNXT_FLAG_LRO;
10906
10907         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10908                 flags &= ~BNXT_FLAG_TPA;
10909
10910         if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10911                 flags |= BNXT_FLAG_STRIP_VLAN;
10912
10913         if (features & NETIF_F_NTUPLE)
10914                 flags |= BNXT_FLAG_RFS;
10915
10916         changes = flags ^ bp->flags;
10917         if (changes & BNXT_FLAG_TPA) {
10918                 update_tpa = true;
10919                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10920                     (flags & BNXT_FLAG_TPA) == 0 ||
10921                     (bp->flags & BNXT_FLAG_CHIP_P5))
10922                         re_init = true;
10923         }
10924
10925         if (changes & ~BNXT_FLAG_TPA)
10926                 re_init = true;
10927
10928         if (flags != bp->flags) {
10929                 u32 old_flags = bp->flags;
10930
10931                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10932                         bp->flags = flags;
10933                         if (update_tpa)
10934                                 bnxt_set_ring_params(bp);
10935                         return rc;
10936                 }
10937
10938                 if (re_init) {
10939                         bnxt_close_nic(bp, false, false);
10940                         bp->flags = flags;
10941                         if (update_tpa)
10942                                 bnxt_set_ring_params(bp);
10943
10944                         return bnxt_open_nic(bp, false, false);
10945                 }
10946                 if (update_tpa) {
10947                         bp->flags = flags;
10948                         rc = bnxt_set_tpa(bp,
10949                                           (flags & BNXT_FLAG_TPA) ?
10950                                           true : false);
10951                         if (rc)
10952                                 bp->flags = old_flags;
10953                 }
10954         }
10955         return rc;
10956 }
10957
10958 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
10959                               u8 **nextp)
10960 {
10961         struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
10962         int hdr_count = 0;
10963         u8 *nexthdr;
10964         int start;
10965
10966         /* Check that there are at most 2 IPv6 extension headers, no
10967          * fragment header, and each is <= 64 bytes.
10968          */
10969         start = nw_off + sizeof(*ip6h);
10970         nexthdr = &ip6h->nexthdr;
10971         while (ipv6_ext_hdr(*nexthdr)) {
10972                 struct ipv6_opt_hdr *hp;
10973                 int hdrlen;
10974
10975                 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
10976                     *nexthdr == NEXTHDR_FRAGMENT)
10977                         return false;
10978                 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
10979                                           skb_headlen(skb), NULL);
10980                 if (!hp)
10981                         return false;
10982                 if (*nexthdr == NEXTHDR_AUTH)
10983                         hdrlen = ipv6_authlen(hp);
10984                 else
10985                         hdrlen = ipv6_optlen(hp);
10986
10987                 if (hdrlen > 64)
10988                         return false;
10989                 nexthdr = &hp->nexthdr;
10990                 start += hdrlen;
10991                 hdr_count++;
10992         }
10993         if (nextp) {
10994                 /* Caller will check inner protocol */
10995                 if (skb->encapsulation) {
10996                         *nextp = nexthdr;
10997                         return true;
10998                 }
10999                 *nextp = NULL;
11000         }
11001         /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11002         return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11003 }
11004
11005 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11006 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11007 {
11008         struct udphdr *uh = udp_hdr(skb);
11009         __be16 udp_port = uh->dest;
11010
11011         if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11012                 return false;
11013         if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11014                 struct ethhdr *eh = inner_eth_hdr(skb);
11015
11016                 switch (eh->h_proto) {
11017                 case htons(ETH_P_IP):
11018                         return true;
11019                 case htons(ETH_P_IPV6):
11020                         return bnxt_exthdr_check(bp, skb,
11021                                                  skb_inner_network_offset(skb),
11022                                                  NULL);
11023                 }
11024         }
11025         return false;
11026 }
11027
11028 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11029 {
11030         switch (l4_proto) {
11031         case IPPROTO_UDP:
11032                 return bnxt_udp_tunl_check(bp, skb);
11033         case IPPROTO_IPIP:
11034                 return true;
11035         case IPPROTO_GRE: {
11036                 switch (skb->inner_protocol) {
11037                 default:
11038                         return false;
11039                 case htons(ETH_P_IP):
11040                         return true;
11041                 case htons(ETH_P_IPV6):
11042                         fallthrough;
11043                 }
11044         }
11045         case IPPROTO_IPV6:
11046                 /* Check ext headers of inner ipv6 */
11047                 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11048                                          NULL);
11049         }
11050         return false;
11051 }
11052
11053 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11054                                              struct net_device *dev,
11055                                              netdev_features_t features)
11056 {
11057         struct bnxt *bp = netdev_priv(dev);
11058         u8 *l4_proto;
11059
11060         features = vlan_features_check(skb, features);
11061         switch (vlan_get_protocol(skb)) {
11062         case htons(ETH_P_IP):
11063                 if (!skb->encapsulation)
11064                         return features;
11065                 l4_proto = &ip_hdr(skb)->protocol;
11066                 if (bnxt_tunl_check(bp, skb, *l4_proto))
11067                         return features;
11068                 break;
11069         case htons(ETH_P_IPV6):
11070                 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11071                                        &l4_proto))
11072                         break;
11073                 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11074                         return features;
11075                 break;
11076         }
11077         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11078 }
11079
11080 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11081                          u32 *reg_buf)
11082 {
11083         struct hwrm_dbg_read_direct_output *resp;
11084         struct hwrm_dbg_read_direct_input *req;
11085         __le32 *dbg_reg_buf;
11086         dma_addr_t mapping;
11087         int rc, i;
11088
11089         rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11090         if (rc)
11091                 return rc;
11092
11093         dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11094                                          &mapping);
11095         if (!dbg_reg_buf) {
11096                 rc = -ENOMEM;
11097                 goto dbg_rd_reg_exit;
11098         }
11099
11100         req->host_dest_addr = cpu_to_le64(mapping);
11101
11102         resp = hwrm_req_hold(bp, req);
11103         req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11104         req->read_len32 = cpu_to_le32(num_words);
11105
11106         rc = hwrm_req_send(bp, req);
11107         if (rc || resp->error_code) {
11108                 rc = -EIO;
11109                 goto dbg_rd_reg_exit;
11110         }
11111         for (i = 0; i < num_words; i++)
11112                 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11113
11114 dbg_rd_reg_exit:
11115         hwrm_req_drop(bp, req);
11116         return rc;
11117 }
11118
11119 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11120                                        u32 ring_id, u32 *prod, u32 *cons)
11121 {
11122         struct hwrm_dbg_ring_info_get_output *resp;
11123         struct hwrm_dbg_ring_info_get_input *req;
11124         int rc;
11125
11126         rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11127         if (rc)
11128                 return rc;
11129
11130         req->ring_type = ring_type;
11131         req->fw_ring_id = cpu_to_le32(ring_id);
11132         resp = hwrm_req_hold(bp, req);
11133         rc = hwrm_req_send(bp, req);
11134         if (!rc) {
11135                 *prod = le32_to_cpu(resp->producer_index);
11136                 *cons = le32_to_cpu(resp->consumer_index);
11137         }
11138         hwrm_req_drop(bp, req);
11139         return rc;
11140 }
11141
11142 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11143 {
11144         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11145         int i = bnapi->index;
11146
11147         if (!txr)
11148                 return;
11149
11150         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11151                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11152                     txr->tx_cons);
11153 }
11154
11155 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11156 {
11157         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11158         int i = bnapi->index;
11159
11160         if (!rxr)
11161                 return;
11162
11163         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11164                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11165                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11166                     rxr->rx_sw_agg_prod);
11167 }
11168
11169 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11170 {
11171         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11172         int i = bnapi->index;
11173
11174         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11175                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11176 }
11177
11178 static void bnxt_dbg_dump_states(struct bnxt *bp)
11179 {
11180         int i;
11181         struct bnxt_napi *bnapi;
11182
11183         for (i = 0; i < bp->cp_nr_rings; i++) {
11184                 bnapi = bp->bnapi[i];
11185                 if (netif_msg_drv(bp)) {
11186                         bnxt_dump_tx_sw_state(bnapi);
11187                         bnxt_dump_rx_sw_state(bnapi);
11188                         bnxt_dump_cp_sw_state(bnapi);
11189                 }
11190         }
11191 }
11192
11193 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11194 {
11195         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11196         struct hwrm_ring_reset_input *req;
11197         struct bnxt_napi *bnapi = rxr->bnapi;
11198         struct bnxt_cp_ring_info *cpr;
11199         u16 cp_ring_id;
11200         int rc;
11201
11202         rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11203         if (rc)
11204                 return rc;
11205
11206         cpr = &bnapi->cp_ring;
11207         cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11208         req->cmpl_ring = cpu_to_le16(cp_ring_id);
11209         req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11210         req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11211         return hwrm_req_send_silent(bp, req);
11212 }
11213
11214 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11215 {
11216         if (!silent)
11217                 bnxt_dbg_dump_states(bp);
11218         if (netif_running(bp->dev)) {
11219                 int rc;
11220
11221                 if (silent) {
11222                         bnxt_close_nic(bp, false, false);
11223                         bnxt_open_nic(bp, false, false);
11224                 } else {
11225                         bnxt_ulp_stop(bp);
11226                         bnxt_close_nic(bp, true, false);
11227                         rc = bnxt_open_nic(bp, true, false);
11228                         bnxt_ulp_start(bp, rc);
11229                 }
11230         }
11231 }
11232
11233 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11234 {
11235         struct bnxt *bp = netdev_priv(dev);
11236
11237         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
11238         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11239         bnxt_queue_sp_work(bp);
11240 }
11241
11242 static void bnxt_fw_health_check(struct bnxt *bp)
11243 {
11244         struct bnxt_fw_health *fw_health = bp->fw_health;
11245         u32 val;
11246
11247         if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11248                 return;
11249
11250         if (fw_health->tmr_counter) {
11251                 fw_health->tmr_counter--;
11252                 return;
11253         }
11254
11255         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11256         if (val == fw_health->last_fw_heartbeat)
11257                 goto fw_reset;
11258
11259         fw_health->last_fw_heartbeat = val;
11260
11261         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11262         if (val != fw_health->last_fw_reset_cnt)
11263                 goto fw_reset;
11264
11265         fw_health->tmr_counter = fw_health->tmr_multiplier;
11266         return;
11267
11268 fw_reset:
11269         set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11270         bnxt_queue_sp_work(bp);
11271 }
11272
11273 static void bnxt_timer(struct timer_list *t)
11274 {
11275         struct bnxt *bp = from_timer(bp, t, timer);
11276         struct net_device *dev = bp->dev;
11277
11278         if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11279                 return;
11280
11281         if (atomic_read(&bp->intr_sem) != 0)
11282                 goto bnxt_restart_timer;
11283
11284         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11285                 bnxt_fw_health_check(bp);
11286
11287         if (bp->link_info.link_up && bp->stats_coal_ticks) {
11288                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11289                 bnxt_queue_sp_work(bp);
11290         }
11291
11292         if (bnxt_tc_flower_enabled(bp)) {
11293                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11294                 bnxt_queue_sp_work(bp);
11295         }
11296
11297 #ifdef CONFIG_RFS_ACCEL
11298         if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11299                 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11300                 bnxt_queue_sp_work(bp);
11301         }
11302 #endif /*CONFIG_RFS_ACCEL*/
11303
11304         if (bp->link_info.phy_retry) {
11305                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11306                         bp->link_info.phy_retry = false;
11307                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11308                 } else {
11309                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11310                         bnxt_queue_sp_work(bp);
11311                 }
11312         }
11313
11314         if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11315             netif_carrier_ok(dev)) {
11316                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11317                 bnxt_queue_sp_work(bp);
11318         }
11319 bnxt_restart_timer:
11320         mod_timer(&bp->timer, jiffies + bp->current_interval);
11321 }
11322
11323 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11324 {
11325         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11326          * set.  If the device is being closed, bnxt_close() may be holding
11327          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
11328          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11329          */
11330         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11331         rtnl_lock();
11332 }
11333
11334 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11335 {
11336         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11337         rtnl_unlock();
11338 }
11339
11340 /* Only called from bnxt_sp_task() */
11341 static void bnxt_reset(struct bnxt *bp, bool silent)
11342 {
11343         bnxt_rtnl_lock_sp(bp);
11344         if (test_bit(BNXT_STATE_OPEN, &bp->state))
11345                 bnxt_reset_task(bp, silent);
11346         bnxt_rtnl_unlock_sp(bp);
11347 }
11348
11349 /* Only called from bnxt_sp_task() */
11350 static void bnxt_rx_ring_reset(struct bnxt *bp)
11351 {
11352         int i;
11353
11354         bnxt_rtnl_lock_sp(bp);
11355         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11356                 bnxt_rtnl_unlock_sp(bp);
11357                 return;
11358         }
11359         /* Disable and flush TPA before resetting the RX ring */
11360         if (bp->flags & BNXT_FLAG_TPA)
11361                 bnxt_set_tpa(bp, false);
11362         for (i = 0; i < bp->rx_nr_rings; i++) {
11363                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11364                 struct bnxt_cp_ring_info *cpr;
11365                 int rc;
11366
11367                 if (!rxr->bnapi->in_reset)
11368                         continue;
11369
11370                 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11371                 if (rc) {
11372                         if (rc == -EINVAL || rc == -EOPNOTSUPP)
11373                                 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11374                         else
11375                                 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11376                                             rc);
11377                         bnxt_reset_task(bp, true);
11378                         break;
11379                 }
11380                 bnxt_free_one_rx_ring_skbs(bp, i);
11381                 rxr->rx_prod = 0;
11382                 rxr->rx_agg_prod = 0;
11383                 rxr->rx_sw_agg_prod = 0;
11384                 rxr->rx_next_cons = 0;
11385                 rxr->bnapi->in_reset = false;
11386                 bnxt_alloc_one_rx_ring(bp, i);
11387                 cpr = &rxr->bnapi->cp_ring;
11388                 cpr->sw_stats.rx.rx_resets++;
11389                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11390                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11391                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11392         }
11393         if (bp->flags & BNXT_FLAG_TPA)
11394                 bnxt_set_tpa(bp, true);
11395         bnxt_rtnl_unlock_sp(bp);
11396 }
11397
11398 static void bnxt_fw_reset_close(struct bnxt *bp)
11399 {
11400         bnxt_ulp_stop(bp);
11401         /* When firmware is in fatal state, quiesce device and disable
11402          * bus master to prevent any potential bad DMAs before freeing
11403          * kernel memory.
11404          */
11405         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11406                 u16 val = 0;
11407
11408                 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11409                 if (val == 0xffff)
11410                         bp->fw_reset_min_dsecs = 0;
11411                 bnxt_tx_disable(bp);
11412                 bnxt_disable_napi(bp);
11413                 bnxt_disable_int_sync(bp);
11414                 bnxt_free_irq(bp);
11415                 bnxt_clear_int_mode(bp);
11416                 pci_disable_device(bp->pdev);
11417         }
11418         __bnxt_close_nic(bp, true, false);
11419         bnxt_vf_reps_free(bp);
11420         bnxt_clear_int_mode(bp);
11421         bnxt_hwrm_func_drv_unrgtr(bp);
11422         if (pci_is_enabled(bp->pdev))
11423                 pci_disable_device(bp->pdev);
11424         bnxt_free_ctx_mem(bp);
11425         kfree(bp->ctx);
11426         bp->ctx = NULL;
11427 }
11428
11429 static bool is_bnxt_fw_ok(struct bnxt *bp)
11430 {
11431         struct bnxt_fw_health *fw_health = bp->fw_health;
11432         bool no_heartbeat = false, has_reset = false;
11433         u32 val;
11434
11435         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11436         if (val == fw_health->last_fw_heartbeat)
11437                 no_heartbeat = true;
11438
11439         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11440         if (val != fw_health->last_fw_reset_cnt)
11441                 has_reset = true;
11442
11443         if (!no_heartbeat && has_reset)
11444                 return true;
11445
11446         return false;
11447 }
11448
11449 /* rtnl_lock is acquired before calling this function */
11450 static void bnxt_force_fw_reset(struct bnxt *bp)
11451 {
11452         struct bnxt_fw_health *fw_health = bp->fw_health;
11453         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11454         u32 wait_dsecs;
11455
11456         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11457             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11458                 return;
11459
11460         if (ptp) {
11461                 spin_lock_bh(&ptp->ptp_lock);
11462                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11463                 spin_unlock_bh(&ptp->ptp_lock);
11464         } else {
11465                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11466         }
11467         bnxt_fw_reset_close(bp);
11468         wait_dsecs = fw_health->master_func_wait_dsecs;
11469         if (fw_health->master) {
11470                 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11471                         wait_dsecs = 0;
11472                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11473         } else {
11474                 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11475                 wait_dsecs = fw_health->normal_func_wait_dsecs;
11476                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11477         }
11478
11479         bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11480         bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11481         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11482 }
11483
11484 void bnxt_fw_exception(struct bnxt *bp)
11485 {
11486         netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11487         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11488         bnxt_rtnl_lock_sp(bp);
11489         bnxt_force_fw_reset(bp);
11490         bnxt_rtnl_unlock_sp(bp);
11491 }
11492
11493 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11494  * < 0 on error.
11495  */
11496 static int bnxt_get_registered_vfs(struct bnxt *bp)
11497 {
11498 #ifdef CONFIG_BNXT_SRIOV
11499         int rc;
11500
11501         if (!BNXT_PF(bp))
11502                 return 0;
11503
11504         rc = bnxt_hwrm_func_qcfg(bp);
11505         if (rc) {
11506                 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11507                 return rc;
11508         }
11509         if (bp->pf.registered_vfs)
11510                 return bp->pf.registered_vfs;
11511         if (bp->sriov_cfg)
11512                 return 1;
11513 #endif
11514         return 0;
11515 }
11516
11517 void bnxt_fw_reset(struct bnxt *bp)
11518 {
11519         bnxt_rtnl_lock_sp(bp);
11520         if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11521             !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11522                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11523                 int n = 0, tmo;
11524
11525                 if (ptp) {
11526                         spin_lock_bh(&ptp->ptp_lock);
11527                         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11528                         spin_unlock_bh(&ptp->ptp_lock);
11529                 } else {
11530                         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11531                 }
11532                 if (bp->pf.active_vfs &&
11533                     !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11534                         n = bnxt_get_registered_vfs(bp);
11535                 if (n < 0) {
11536                         netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11537                                    n);
11538                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11539                         dev_close(bp->dev);
11540                         goto fw_reset_exit;
11541                 } else if (n > 0) {
11542                         u16 vf_tmo_dsecs = n * 10;
11543
11544                         if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11545                                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11546                         bp->fw_reset_state =
11547                                 BNXT_FW_RESET_STATE_POLL_VF;
11548                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11549                         goto fw_reset_exit;
11550                 }
11551                 bnxt_fw_reset_close(bp);
11552                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11553                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11554                         tmo = HZ / 10;
11555                 } else {
11556                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11557                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
11558                 }
11559                 bnxt_queue_fw_reset_work(bp, tmo);
11560         }
11561 fw_reset_exit:
11562         bnxt_rtnl_unlock_sp(bp);
11563 }
11564
11565 static void bnxt_chk_missed_irq(struct bnxt *bp)
11566 {
11567         int i;
11568
11569         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11570                 return;
11571
11572         for (i = 0; i < bp->cp_nr_rings; i++) {
11573                 struct bnxt_napi *bnapi = bp->bnapi[i];
11574                 struct bnxt_cp_ring_info *cpr;
11575                 u32 fw_ring_id;
11576                 int j;
11577
11578                 if (!bnapi)
11579                         continue;
11580
11581                 cpr = &bnapi->cp_ring;
11582                 for (j = 0; j < 2; j++) {
11583                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11584                         u32 val[2];
11585
11586                         if (!cpr2 || cpr2->has_more_work ||
11587                             !bnxt_has_work(bp, cpr2))
11588                                 continue;
11589
11590                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11591                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11592                                 continue;
11593                         }
11594                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11595                         bnxt_dbg_hwrm_ring_info_get(bp,
11596                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11597                                 fw_ring_id, &val[0], &val[1]);
11598                         cpr->sw_stats.cmn.missed_irqs++;
11599                 }
11600         }
11601 }
11602
11603 static void bnxt_cfg_ntp_filters(struct bnxt *);
11604
11605 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11606 {
11607         struct bnxt_link_info *link_info = &bp->link_info;
11608
11609         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11610                 link_info->autoneg = BNXT_AUTONEG_SPEED;
11611                 if (bp->hwrm_spec_code >= 0x10201) {
11612                         if (link_info->auto_pause_setting &
11613                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11614                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11615                 } else {
11616                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11617                 }
11618                 link_info->advertising = link_info->auto_link_speeds;
11619                 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11620         } else {
11621                 link_info->req_link_speed = link_info->force_link_speed;
11622                 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11623                 if (link_info->force_pam4_link_speed) {
11624                         link_info->req_link_speed =
11625                                 link_info->force_pam4_link_speed;
11626                         link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11627                 }
11628                 link_info->req_duplex = link_info->duplex_setting;
11629         }
11630         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11631                 link_info->req_flow_ctrl =
11632                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11633         else
11634                 link_info->req_flow_ctrl = link_info->force_pause_setting;
11635 }
11636
11637 static void bnxt_fw_echo_reply(struct bnxt *bp)
11638 {
11639         struct bnxt_fw_health *fw_health = bp->fw_health;
11640         struct hwrm_func_echo_response_input *req;
11641         int rc;
11642
11643         rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
11644         if (rc)
11645                 return;
11646         req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11647         req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11648         hwrm_req_send(bp, req);
11649 }
11650
11651 static void bnxt_sp_task(struct work_struct *work)
11652 {
11653         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11654
11655         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11656         smp_mb__after_atomic();
11657         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11658                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11659                 return;
11660         }
11661
11662         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11663                 bnxt_cfg_rx_mode(bp);
11664
11665         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11666                 bnxt_cfg_ntp_filters(bp);
11667         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11668                 bnxt_hwrm_exec_fwd_req(bp);
11669         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11670                 bnxt_hwrm_port_qstats(bp, 0);
11671                 bnxt_hwrm_port_qstats_ext(bp, 0);
11672                 bnxt_accumulate_all_stats(bp);
11673         }
11674
11675         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11676                 int rc;
11677
11678                 mutex_lock(&bp->link_lock);
11679                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11680                                        &bp->sp_event))
11681                         bnxt_hwrm_phy_qcaps(bp);
11682
11683                 rc = bnxt_update_link(bp, true);
11684                 if (rc)
11685                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11686                                    rc);
11687
11688                 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11689                                        &bp->sp_event))
11690                         bnxt_init_ethtool_link_settings(bp);
11691                 mutex_unlock(&bp->link_lock);
11692         }
11693         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11694                 int rc;
11695
11696                 mutex_lock(&bp->link_lock);
11697                 rc = bnxt_update_phy_setting(bp);
11698                 mutex_unlock(&bp->link_lock);
11699                 if (rc) {
11700                         netdev_warn(bp->dev, "update phy settings retry failed\n");
11701                 } else {
11702                         bp->link_info.phy_retry = false;
11703                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
11704                 }
11705         }
11706         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11707                 mutex_lock(&bp->link_lock);
11708                 bnxt_get_port_module_status(bp);
11709                 mutex_unlock(&bp->link_lock);
11710         }
11711
11712         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11713                 bnxt_tc_flow_stats_work(bp);
11714
11715         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11716                 bnxt_chk_missed_irq(bp);
11717
11718         if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11719                 bnxt_fw_echo_reply(bp);
11720
11721         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
11722          * must be the last functions to be called before exiting.
11723          */
11724         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11725                 bnxt_reset(bp, false);
11726
11727         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11728                 bnxt_reset(bp, true);
11729
11730         if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11731                 bnxt_rx_ring_reset(bp);
11732
11733         if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11734                 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11735
11736         if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11737                 if (!is_bnxt_fw_ok(bp))
11738                         bnxt_devlink_health_report(bp,
11739                                                    BNXT_FW_EXCEPTION_SP_EVENT);
11740         }
11741
11742         smp_mb__before_atomic();
11743         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11744 }
11745
11746 /* Under rtnl_lock */
11747 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11748                      int tx_xdp)
11749 {
11750         int max_rx, max_tx, tx_sets = 1;
11751         int tx_rings_needed, stats;
11752         int rx_rings = rx;
11753         int cp, vnics, rc;
11754
11755         if (tcs)
11756                 tx_sets = tcs;
11757
11758         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11759         if (rc)
11760                 return rc;
11761
11762         if (max_rx < rx)
11763                 return -ENOMEM;
11764
11765         tx_rings_needed = tx * tx_sets + tx_xdp;
11766         if (max_tx < tx_rings_needed)
11767                 return -ENOMEM;
11768
11769         vnics = 1;
11770         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11771                 vnics += rx_rings;
11772
11773         if (bp->flags & BNXT_FLAG_AGG_RINGS)
11774                 rx_rings <<= 1;
11775         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11776         stats = cp;
11777         if (BNXT_NEW_RM(bp)) {
11778                 cp += bnxt_get_ulp_msix_num(bp);
11779                 stats += bnxt_get_ulp_stat_ctxs(bp);
11780         }
11781         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11782                                      stats, vnics);
11783 }
11784
11785 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11786 {
11787         if (bp->bar2) {
11788                 pci_iounmap(pdev, bp->bar2);
11789                 bp->bar2 = NULL;
11790         }
11791
11792         if (bp->bar1) {
11793                 pci_iounmap(pdev, bp->bar1);
11794                 bp->bar1 = NULL;
11795         }
11796
11797         if (bp->bar0) {
11798                 pci_iounmap(pdev, bp->bar0);
11799                 bp->bar0 = NULL;
11800         }
11801 }
11802
11803 static void bnxt_cleanup_pci(struct bnxt *bp)
11804 {
11805         bnxt_unmap_bars(bp, bp->pdev);
11806         pci_release_regions(bp->pdev);
11807         if (pci_is_enabled(bp->pdev))
11808                 pci_disable_device(bp->pdev);
11809 }
11810
11811 static void bnxt_init_dflt_coal(struct bnxt *bp)
11812 {
11813         struct bnxt_coal *coal;
11814
11815         /* Tick values in micro seconds.
11816          * 1 coal_buf x bufs_per_record = 1 completion record.
11817          */
11818         coal = &bp->rx_coal;
11819         coal->coal_ticks = 10;
11820         coal->coal_bufs = 30;
11821         coal->coal_ticks_irq = 1;
11822         coal->coal_bufs_irq = 2;
11823         coal->idle_thresh = 50;
11824         coal->bufs_per_record = 2;
11825         coal->budget = 64;              /* NAPI budget */
11826
11827         coal = &bp->tx_coal;
11828         coal->coal_ticks = 28;
11829         coal->coal_bufs = 30;
11830         coal->coal_ticks_irq = 2;
11831         coal->coal_bufs_irq = 2;
11832         coal->bufs_per_record = 1;
11833
11834         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11835 }
11836
11837 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11838 {
11839         int rc;
11840
11841         bp->fw_cap = 0;
11842         rc = bnxt_hwrm_ver_get(bp);
11843         bnxt_try_map_fw_health_reg(bp);
11844         if (rc) {
11845                 rc = bnxt_try_recover_fw(bp);
11846                 if (rc)
11847                         return rc;
11848                 rc = bnxt_hwrm_ver_get(bp);
11849                 if (rc)
11850                         return rc;
11851         }
11852
11853         bnxt_nvm_cfg_ver_get(bp);
11854
11855         rc = bnxt_hwrm_func_reset(bp);
11856         if (rc)
11857                 return -ENODEV;
11858
11859         bnxt_hwrm_fw_set_time(bp);
11860         return 0;
11861 }
11862
11863 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11864 {
11865         int rc;
11866
11867         /* Get the MAX capabilities for this function */
11868         rc = bnxt_hwrm_func_qcaps(bp);
11869         if (rc) {
11870                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11871                            rc);
11872                 return -ENODEV;
11873         }
11874
11875         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11876         if (rc)
11877                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11878                             rc);
11879
11880         if (bnxt_alloc_fw_health(bp)) {
11881                 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11882         } else {
11883                 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11884                 if (rc)
11885                         netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11886                                     rc);
11887         }
11888
11889         rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11890         if (rc)
11891                 return -ENODEV;
11892
11893         bnxt_hwrm_func_qcfg(bp);
11894         bnxt_hwrm_vnic_qcaps(bp);
11895         bnxt_hwrm_port_led_qcaps(bp);
11896         bnxt_ethtool_init(bp);
11897         bnxt_dcb_init(bp);
11898         return 0;
11899 }
11900
11901 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11902 {
11903         bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11904         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11905                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11906                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11907                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11908         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11909                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11910                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11911                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11912         }
11913 }
11914
11915 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11916 {
11917         struct net_device *dev = bp->dev;
11918
11919         dev->hw_features &= ~NETIF_F_NTUPLE;
11920         dev->features &= ~NETIF_F_NTUPLE;
11921         bp->flags &= ~BNXT_FLAG_RFS;
11922         if (bnxt_rfs_supported(bp)) {
11923                 dev->hw_features |= NETIF_F_NTUPLE;
11924                 if (bnxt_rfs_capable(bp)) {
11925                         bp->flags |= BNXT_FLAG_RFS;
11926                         dev->features |= NETIF_F_NTUPLE;
11927                 }
11928         }
11929 }
11930
11931 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11932 {
11933         struct pci_dev *pdev = bp->pdev;
11934
11935         bnxt_set_dflt_rss_hash_type(bp);
11936         bnxt_set_dflt_rfs(bp);
11937
11938         bnxt_get_wol_settings(bp);
11939         if (bp->flags & BNXT_FLAG_WOL_CAP)
11940                 device_set_wakeup_enable(&pdev->dev, bp->wol);
11941         else
11942                 device_set_wakeup_capable(&pdev->dev, false);
11943
11944         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11945         bnxt_hwrm_coal_params_qcaps(bp);
11946 }
11947
11948 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11949
11950 static int bnxt_fw_init_one(struct bnxt *bp)
11951 {
11952         int rc;
11953
11954         rc = bnxt_fw_init_one_p1(bp);
11955         if (rc) {
11956                 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11957                 return rc;
11958         }
11959         rc = bnxt_fw_init_one_p2(bp);
11960         if (rc) {
11961                 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11962                 return rc;
11963         }
11964         rc = bnxt_probe_phy(bp, false);
11965         if (rc)
11966                 return rc;
11967         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11968         if (rc)
11969                 return rc;
11970
11971         /* In case fw capabilities have changed, destroy the unneeded
11972          * reporters and create newly capable ones.
11973          */
11974         bnxt_dl_fw_reporters_destroy(bp, false);
11975         bnxt_dl_fw_reporters_create(bp);
11976         bnxt_fw_init_one_p3(bp);
11977         return 0;
11978 }
11979
11980 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11981 {
11982         struct bnxt_fw_health *fw_health = bp->fw_health;
11983         u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11984         u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11985         u32 reg_type, reg_off, delay_msecs;
11986
11987         delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11988         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11989         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11990         switch (reg_type) {
11991         case BNXT_FW_HEALTH_REG_TYPE_CFG:
11992                 pci_write_config_dword(bp->pdev, reg_off, val);
11993                 break;
11994         case BNXT_FW_HEALTH_REG_TYPE_GRC:
11995                 writel(reg_off & BNXT_GRC_BASE_MASK,
11996                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11997                 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
11998                 fallthrough;
11999         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12000                 writel(val, bp->bar0 + reg_off);
12001                 break;
12002         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12003                 writel(val, bp->bar1 + reg_off);
12004                 break;
12005         }
12006         if (delay_msecs) {
12007                 pci_read_config_dword(bp->pdev, 0, &val);
12008                 msleep(delay_msecs);
12009         }
12010 }
12011
12012 static void bnxt_reset_all(struct bnxt *bp)
12013 {
12014         struct bnxt_fw_health *fw_health = bp->fw_health;
12015         int i, rc;
12016
12017         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12018                 bnxt_fw_reset_via_optee(bp);
12019                 bp->fw_reset_timestamp = jiffies;
12020                 return;
12021         }
12022
12023         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12024                 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12025                         bnxt_fw_reset_writel(bp, i);
12026         } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
12027                 struct hwrm_fw_reset_input *req;
12028
12029                 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12030                 if (!rc) {
12031                         req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12032                         req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12033                         req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12034                         req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12035                         rc = hwrm_req_send(bp, req);
12036                 }
12037                 if (rc != -ENODEV)
12038                         netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12039         }
12040         bp->fw_reset_timestamp = jiffies;
12041 }
12042
12043 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12044 {
12045         return time_after(jiffies, bp->fw_reset_timestamp +
12046                           (bp->fw_reset_max_dsecs * HZ / 10));
12047 }
12048
12049 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12050 {
12051         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12052         if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12053                 bnxt_ulp_start(bp, rc);
12054                 bnxt_dl_health_status_update(bp, false);
12055         }
12056         bp->fw_reset_state = 0;
12057         dev_close(bp->dev);
12058 }
12059
12060 static void bnxt_fw_reset_task(struct work_struct *work)
12061 {
12062         struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12063         int rc = 0;
12064
12065         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12066                 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12067                 return;
12068         }
12069
12070         switch (bp->fw_reset_state) {
12071         case BNXT_FW_RESET_STATE_POLL_VF: {
12072                 int n = bnxt_get_registered_vfs(bp);
12073                 int tmo;
12074
12075                 if (n < 0) {
12076                         netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12077                                    n, jiffies_to_msecs(jiffies -
12078                                    bp->fw_reset_timestamp));
12079                         goto fw_reset_abort;
12080                 } else if (n > 0) {
12081                         if (bnxt_fw_reset_timeout(bp)) {
12082                                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12083                                 bp->fw_reset_state = 0;
12084                                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12085                                            n);
12086                                 return;
12087                         }
12088                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12089                         return;
12090                 }
12091                 bp->fw_reset_timestamp = jiffies;
12092                 rtnl_lock();
12093                 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12094                         bnxt_fw_reset_abort(bp, rc);
12095                         rtnl_unlock();
12096                         return;
12097                 }
12098                 bnxt_fw_reset_close(bp);
12099                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12100                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12101                         tmo = HZ / 10;
12102                 } else {
12103                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12104                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
12105                 }
12106                 rtnl_unlock();
12107                 bnxt_queue_fw_reset_work(bp, tmo);
12108                 return;
12109         }
12110         case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12111                 u32 val;
12112
12113                 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12114                 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12115                     !bnxt_fw_reset_timeout(bp)) {
12116                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12117                         return;
12118                 }
12119
12120                 if (!bp->fw_health->master) {
12121                         u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12122
12123                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12124                         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12125                         return;
12126                 }
12127                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12128         }
12129                 fallthrough;
12130         case BNXT_FW_RESET_STATE_RESET_FW:
12131                 bnxt_reset_all(bp);
12132                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12133                 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12134                 return;
12135         case BNXT_FW_RESET_STATE_ENABLE_DEV:
12136                 bnxt_inv_fw_health_reg(bp);
12137                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12138                     !bp->fw_reset_min_dsecs) {
12139                         u16 val;
12140
12141                         pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12142                         if (val == 0xffff) {
12143                                 if (bnxt_fw_reset_timeout(bp)) {
12144                                         netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12145                                         rc = -ETIMEDOUT;
12146                                         goto fw_reset_abort;
12147                                 }
12148                                 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12149                                 return;
12150                         }
12151                 }
12152                 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12153                 if (pci_enable_device(bp->pdev)) {
12154                         netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12155                         rc = -ENODEV;
12156                         goto fw_reset_abort;
12157                 }
12158                 pci_set_master(bp->pdev);
12159                 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12160                 fallthrough;
12161         case BNXT_FW_RESET_STATE_POLL_FW:
12162                 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12163                 rc = bnxt_hwrm_poll(bp);
12164                 if (rc) {
12165                         if (bnxt_fw_reset_timeout(bp)) {
12166                                 netdev_err(bp->dev, "Firmware reset aborted\n");
12167                                 goto fw_reset_abort_status;
12168                         }
12169                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12170                         return;
12171                 }
12172                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12173                 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12174                 fallthrough;
12175         case BNXT_FW_RESET_STATE_OPENING:
12176                 while (!rtnl_trylock()) {
12177                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12178                         return;
12179                 }
12180                 rc = bnxt_open(bp->dev);
12181                 if (rc) {
12182                         netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12183                         bnxt_fw_reset_abort(bp, rc);
12184                         rtnl_unlock();
12185                         return;
12186                 }
12187
12188                 bp->fw_reset_state = 0;
12189                 /* Make sure fw_reset_state is 0 before clearing the flag */
12190                 smp_mb__before_atomic();
12191                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12192                 bnxt_ulp_start(bp, 0);
12193                 bnxt_reenable_sriov(bp);
12194                 bnxt_vf_reps_alloc(bp);
12195                 bnxt_vf_reps_open(bp);
12196                 bnxt_ptp_reapply_pps(bp);
12197                 bnxt_dl_health_recovery_done(bp);
12198                 bnxt_dl_health_status_update(bp, true);
12199                 rtnl_unlock();
12200                 break;
12201         }
12202         return;
12203
12204 fw_reset_abort_status:
12205         if (bp->fw_health->status_reliable ||
12206             (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12207                 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12208
12209                 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12210         }
12211 fw_reset_abort:
12212         rtnl_lock();
12213         bnxt_fw_reset_abort(bp, rc);
12214         rtnl_unlock();
12215 }
12216
12217 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12218 {
12219         int rc;
12220         struct bnxt *bp = netdev_priv(dev);
12221
12222         SET_NETDEV_DEV(dev, &pdev->dev);
12223
12224         /* enable device (incl. PCI PM wakeup), and bus-mastering */
12225         rc = pci_enable_device(pdev);
12226         if (rc) {
12227                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12228                 goto init_err;
12229         }
12230
12231         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12232                 dev_err(&pdev->dev,
12233                         "Cannot find PCI device base address, aborting\n");
12234                 rc = -ENODEV;
12235                 goto init_err_disable;
12236         }
12237
12238         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12239         if (rc) {
12240                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12241                 goto init_err_disable;
12242         }
12243
12244         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12245             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12246                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12247                 rc = -EIO;
12248                 goto init_err_release;
12249         }
12250
12251         pci_set_master(pdev);
12252
12253         bp->dev = dev;
12254         bp->pdev = pdev;
12255
12256         /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12257          * determines the BAR size.
12258          */
12259         bp->bar0 = pci_ioremap_bar(pdev, 0);
12260         if (!bp->bar0) {
12261                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12262                 rc = -ENOMEM;
12263                 goto init_err_release;
12264         }
12265
12266         bp->bar2 = pci_ioremap_bar(pdev, 4);
12267         if (!bp->bar2) {
12268                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12269                 rc = -ENOMEM;
12270                 goto init_err_release;
12271         }
12272
12273         pci_enable_pcie_error_reporting(pdev);
12274
12275         INIT_WORK(&bp->sp_task, bnxt_sp_task);
12276         INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12277
12278         spin_lock_init(&bp->ntp_fltr_lock);
12279 #if BITS_PER_LONG == 32
12280         spin_lock_init(&bp->db_lock);
12281 #endif
12282
12283         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12284         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12285
12286         bnxt_init_dflt_coal(bp);
12287
12288         timer_setup(&bp->timer, bnxt_timer, 0);
12289         bp->current_interval = BNXT_TIMER_INTERVAL;
12290
12291         bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12292         bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12293
12294         clear_bit(BNXT_STATE_OPEN, &bp->state);
12295         return 0;
12296
12297 init_err_release:
12298         bnxt_unmap_bars(bp, pdev);
12299         pci_release_regions(pdev);
12300
12301 init_err_disable:
12302         pci_disable_device(pdev);
12303
12304 init_err:
12305         return rc;
12306 }
12307
12308 /* rtnl_lock held */
12309 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12310 {
12311         struct sockaddr *addr = p;
12312         struct bnxt *bp = netdev_priv(dev);
12313         int rc = 0;
12314
12315         if (!is_valid_ether_addr(addr->sa_data))
12316                 return -EADDRNOTAVAIL;
12317
12318         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12319                 return 0;
12320
12321         rc = bnxt_approve_mac(bp, addr->sa_data, true);
12322         if (rc)
12323                 return rc;
12324
12325         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12326         if (netif_running(dev)) {
12327                 bnxt_close_nic(bp, false, false);
12328                 rc = bnxt_open_nic(bp, false, false);
12329         }
12330
12331         return rc;
12332 }
12333
12334 /* rtnl_lock held */
12335 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12336 {
12337         struct bnxt *bp = netdev_priv(dev);
12338
12339         if (netif_running(dev))
12340                 bnxt_close_nic(bp, true, false);
12341
12342         dev->mtu = new_mtu;
12343         bnxt_set_ring_params(bp);
12344
12345         if (netif_running(dev))
12346                 return bnxt_open_nic(bp, true, false);
12347
12348         return 0;
12349 }
12350
12351 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12352 {
12353         struct bnxt *bp = netdev_priv(dev);
12354         bool sh = false;
12355         int rc;
12356
12357         if (tc > bp->max_tc) {
12358                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12359                            tc, bp->max_tc);
12360                 return -EINVAL;
12361         }
12362
12363         if (netdev_get_num_tc(dev) == tc)
12364                 return 0;
12365
12366         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12367                 sh = true;
12368
12369         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12370                               sh, tc, bp->tx_nr_rings_xdp);
12371         if (rc)
12372                 return rc;
12373
12374         /* Needs to close the device and do hw resource re-allocations */
12375         if (netif_running(bp->dev))
12376                 bnxt_close_nic(bp, true, false);
12377
12378         if (tc) {
12379                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12380                 netdev_set_num_tc(dev, tc);
12381         } else {
12382                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12383                 netdev_reset_tc(dev);
12384         }
12385         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12386         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12387                                bp->tx_nr_rings + bp->rx_nr_rings;
12388
12389         if (netif_running(bp->dev))
12390                 return bnxt_open_nic(bp, true, false);
12391
12392         return 0;
12393 }
12394
12395 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12396                                   void *cb_priv)
12397 {
12398         struct bnxt *bp = cb_priv;
12399
12400         if (!bnxt_tc_flower_enabled(bp) ||
12401             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12402                 return -EOPNOTSUPP;
12403
12404         switch (type) {
12405         case TC_SETUP_CLSFLOWER:
12406                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12407         default:
12408                 return -EOPNOTSUPP;
12409         }
12410 }
12411
12412 LIST_HEAD(bnxt_block_cb_list);
12413
12414 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12415                          void *type_data)
12416 {
12417         struct bnxt *bp = netdev_priv(dev);
12418
12419         switch (type) {
12420         case TC_SETUP_BLOCK:
12421                 return flow_block_cb_setup_simple(type_data,
12422                                                   &bnxt_block_cb_list,
12423                                                   bnxt_setup_tc_block_cb,
12424                                                   bp, bp, true);
12425         case TC_SETUP_QDISC_MQPRIO: {
12426                 struct tc_mqprio_qopt *mqprio = type_data;
12427
12428                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12429
12430                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12431         }
12432         default:
12433                 return -EOPNOTSUPP;
12434         }
12435 }
12436
12437 #ifdef CONFIG_RFS_ACCEL
12438 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12439                             struct bnxt_ntuple_filter *f2)
12440 {
12441         struct flow_keys *keys1 = &f1->fkeys;
12442         struct flow_keys *keys2 = &f2->fkeys;
12443
12444         if (keys1->basic.n_proto != keys2->basic.n_proto ||
12445             keys1->basic.ip_proto != keys2->basic.ip_proto)
12446                 return false;
12447
12448         if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12449                 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12450                     keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12451                         return false;
12452         } else {
12453                 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12454                            sizeof(keys1->addrs.v6addrs.src)) ||
12455                     memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12456                            sizeof(keys1->addrs.v6addrs.dst)))
12457                         return false;
12458         }
12459
12460         if (keys1->ports.ports == keys2->ports.ports &&
12461             keys1->control.flags == keys2->control.flags &&
12462             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12463             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12464                 return true;
12465
12466         return false;
12467 }
12468
12469 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12470                               u16 rxq_index, u32 flow_id)
12471 {
12472         struct bnxt *bp = netdev_priv(dev);
12473         struct bnxt_ntuple_filter *fltr, *new_fltr;
12474         struct flow_keys *fkeys;
12475         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12476         int rc = 0, idx, bit_id, l2_idx = 0;
12477         struct hlist_head *head;
12478         u32 flags;
12479
12480         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12481                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12482                 int off = 0, j;
12483
12484                 netif_addr_lock_bh(dev);
12485                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12486                         if (ether_addr_equal(eth->h_dest,
12487                                              vnic->uc_list + off)) {
12488                                 l2_idx = j + 1;
12489                                 break;
12490                         }
12491                 }
12492                 netif_addr_unlock_bh(dev);
12493                 if (!l2_idx)
12494                         return -EINVAL;
12495         }
12496         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12497         if (!new_fltr)
12498                 return -ENOMEM;
12499
12500         fkeys = &new_fltr->fkeys;
12501         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12502                 rc = -EPROTONOSUPPORT;
12503                 goto err_free;
12504         }
12505
12506         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12507              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12508             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12509              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12510                 rc = -EPROTONOSUPPORT;
12511                 goto err_free;
12512         }
12513         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12514             bp->hwrm_spec_code < 0x10601) {
12515                 rc = -EPROTONOSUPPORT;
12516                 goto err_free;
12517         }
12518         flags = fkeys->control.flags;
12519         if (((flags & FLOW_DIS_ENCAPSULATION) &&
12520              bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12521                 rc = -EPROTONOSUPPORT;
12522                 goto err_free;
12523         }
12524
12525         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12526         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12527
12528         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12529         head = &bp->ntp_fltr_hash_tbl[idx];
12530         rcu_read_lock();
12531         hlist_for_each_entry_rcu(fltr, head, hash) {
12532                 if (bnxt_fltr_match(fltr, new_fltr)) {
12533                         rcu_read_unlock();
12534                         rc = 0;
12535                         goto err_free;
12536                 }
12537         }
12538         rcu_read_unlock();
12539
12540         spin_lock_bh(&bp->ntp_fltr_lock);
12541         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12542                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
12543         if (bit_id < 0) {
12544                 spin_unlock_bh(&bp->ntp_fltr_lock);
12545                 rc = -ENOMEM;
12546                 goto err_free;
12547         }
12548
12549         new_fltr->sw_id = (u16)bit_id;
12550         new_fltr->flow_id = flow_id;
12551         new_fltr->l2_fltr_idx = l2_idx;
12552         new_fltr->rxq = rxq_index;
12553         hlist_add_head_rcu(&new_fltr->hash, head);
12554         bp->ntp_fltr_count++;
12555         spin_unlock_bh(&bp->ntp_fltr_lock);
12556
12557         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12558         bnxt_queue_sp_work(bp);
12559
12560         return new_fltr->sw_id;
12561
12562 err_free:
12563         kfree(new_fltr);
12564         return rc;
12565 }
12566
12567 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12568 {
12569         int i;
12570
12571         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12572                 struct hlist_head *head;
12573                 struct hlist_node *tmp;
12574                 struct bnxt_ntuple_filter *fltr;
12575                 int rc;
12576
12577                 head = &bp->ntp_fltr_hash_tbl[i];
12578                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12579                         bool del = false;
12580
12581                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12582                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12583                                                         fltr->flow_id,
12584                                                         fltr->sw_id)) {
12585                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
12586                                                                          fltr);
12587                                         del = true;
12588                                 }
12589                         } else {
12590                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12591                                                                        fltr);
12592                                 if (rc)
12593                                         del = true;
12594                                 else
12595                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
12596                         }
12597
12598                         if (del) {
12599                                 spin_lock_bh(&bp->ntp_fltr_lock);
12600                                 hlist_del_rcu(&fltr->hash);
12601                                 bp->ntp_fltr_count--;
12602                                 spin_unlock_bh(&bp->ntp_fltr_lock);
12603                                 synchronize_rcu();
12604                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12605                                 kfree(fltr);
12606                         }
12607                 }
12608         }
12609         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12610                 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12611 }
12612
12613 #else
12614
12615 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12616 {
12617 }
12618
12619 #endif /* CONFIG_RFS_ACCEL */
12620
12621 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12622 {
12623         struct bnxt *bp = netdev_priv(netdev);
12624         struct udp_tunnel_info ti;
12625         unsigned int cmd;
12626
12627         udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12628         if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
12629                 bp->vxlan_port = ti.port;
12630                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12631         } else {
12632                 bp->nge_port = ti.port;
12633                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12634         }
12635
12636         if (ti.port)
12637                 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12638
12639         return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12640 }
12641
12642 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12643         .sync_table     = bnxt_udp_tunnel_sync,
12644         .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12645                           UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12646         .tables         = {
12647                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
12648                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12649         },
12650 };
12651
12652 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12653                                struct net_device *dev, u32 filter_mask,
12654                                int nlflags)
12655 {
12656         struct bnxt *bp = netdev_priv(dev);
12657
12658         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12659                                        nlflags, filter_mask, NULL);
12660 }
12661
12662 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12663                                u16 flags, struct netlink_ext_ack *extack)
12664 {
12665         struct bnxt *bp = netdev_priv(dev);
12666         struct nlattr *attr, *br_spec;
12667         int rem, rc = 0;
12668
12669         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12670                 return -EOPNOTSUPP;
12671
12672         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12673         if (!br_spec)
12674                 return -EINVAL;
12675
12676         nla_for_each_nested(attr, br_spec, rem) {
12677                 u16 mode;
12678
12679                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12680                         continue;
12681
12682                 if (nla_len(attr) < sizeof(mode))
12683                         return -EINVAL;
12684
12685                 mode = nla_get_u16(attr);
12686                 if (mode == bp->br_mode)
12687                         break;
12688
12689                 rc = bnxt_hwrm_set_br_mode(bp, mode);
12690                 if (!rc)
12691                         bp->br_mode = mode;
12692                 break;
12693         }
12694         return rc;
12695 }
12696
12697 int bnxt_get_port_parent_id(struct net_device *dev,
12698                             struct netdev_phys_item_id *ppid)
12699 {
12700         struct bnxt *bp = netdev_priv(dev);
12701
12702         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12703                 return -EOPNOTSUPP;
12704
12705         /* The PF and it's VF-reps only support the switchdev framework */
12706         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12707                 return -EOPNOTSUPP;
12708
12709         ppid->id_len = sizeof(bp->dsn);
12710         memcpy(ppid->id, bp->dsn, ppid->id_len);
12711
12712         return 0;
12713 }
12714
12715 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12716 {
12717         struct bnxt *bp = netdev_priv(dev);
12718
12719         return &bp->dl_port;
12720 }
12721
12722 static const struct net_device_ops bnxt_netdev_ops = {
12723         .ndo_open               = bnxt_open,
12724         .ndo_start_xmit         = bnxt_start_xmit,
12725         .ndo_stop               = bnxt_close,
12726         .ndo_get_stats64        = bnxt_get_stats64,
12727         .ndo_set_rx_mode        = bnxt_set_rx_mode,
12728         .ndo_eth_ioctl          = bnxt_ioctl,
12729         .ndo_validate_addr      = eth_validate_addr,
12730         .ndo_set_mac_address    = bnxt_change_mac_addr,
12731         .ndo_change_mtu         = bnxt_change_mtu,
12732         .ndo_fix_features       = bnxt_fix_features,
12733         .ndo_set_features       = bnxt_set_features,
12734         .ndo_features_check     = bnxt_features_check,
12735         .ndo_tx_timeout         = bnxt_tx_timeout,
12736 #ifdef CONFIG_BNXT_SRIOV
12737         .ndo_get_vf_config      = bnxt_get_vf_config,
12738         .ndo_set_vf_mac         = bnxt_set_vf_mac,
12739         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
12740         .ndo_set_vf_rate        = bnxt_set_vf_bw,
12741         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
12742         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
12743         .ndo_set_vf_trust       = bnxt_set_vf_trust,
12744 #endif
12745         .ndo_setup_tc           = bnxt_setup_tc,
12746 #ifdef CONFIG_RFS_ACCEL
12747         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
12748 #endif
12749         .ndo_bpf                = bnxt_xdp,
12750         .ndo_xdp_xmit           = bnxt_xdp_xmit,
12751         .ndo_bridge_getlink     = bnxt_bridge_getlink,
12752         .ndo_bridge_setlink     = bnxt_bridge_setlink,
12753         .ndo_get_devlink_port   = bnxt_get_devlink_port,
12754 };
12755
12756 static void bnxt_remove_one(struct pci_dev *pdev)
12757 {
12758         struct net_device *dev = pci_get_drvdata(pdev);
12759         struct bnxt *bp = netdev_priv(dev);
12760
12761         if (BNXT_PF(bp))
12762                 bnxt_sriov_disable(bp);
12763
12764         if (BNXT_PF(bp))
12765                 devlink_port_type_clear(&bp->dl_port);
12766
12767         bnxt_ptp_clear(bp);
12768         pci_disable_pcie_error_reporting(pdev);
12769         unregister_netdev(dev);
12770         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12771         /* Flush any pending tasks */
12772         cancel_work_sync(&bp->sp_task);
12773         cancel_delayed_work_sync(&bp->fw_reset_task);
12774         bp->sp_event = 0;
12775
12776         bnxt_dl_fw_reporters_destroy(bp, true);
12777         bnxt_dl_unregister(bp);
12778         bnxt_shutdown_tc(bp);
12779
12780         bnxt_clear_int_mode(bp);
12781         bnxt_hwrm_func_drv_unrgtr(bp);
12782         bnxt_free_hwrm_resources(bp);
12783         bnxt_ethtool_free(bp);
12784         bnxt_dcb_free(bp);
12785         kfree(bp->edev);
12786         bp->edev = NULL;
12787         kfree(bp->ptp_cfg);
12788         bp->ptp_cfg = NULL;
12789         kfree(bp->fw_health);
12790         bp->fw_health = NULL;
12791         bnxt_cleanup_pci(bp);
12792         bnxt_free_ctx_mem(bp);
12793         kfree(bp->ctx);
12794         bp->ctx = NULL;
12795         kfree(bp->rss_indir_tbl);
12796         bp->rss_indir_tbl = NULL;
12797         bnxt_free_port_stats(bp);
12798         free_netdev(dev);
12799 }
12800
12801 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12802 {
12803         int rc = 0;
12804         struct bnxt_link_info *link_info = &bp->link_info;
12805
12806         bp->phy_flags = 0;
12807         rc = bnxt_hwrm_phy_qcaps(bp);
12808         if (rc) {
12809                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12810                            rc);
12811                 return rc;
12812         }
12813         if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12814                 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12815         else
12816                 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
12817         if (!fw_dflt)
12818                 return 0;
12819
12820         mutex_lock(&bp->link_lock);
12821         rc = bnxt_update_link(bp, false);
12822         if (rc) {
12823                 mutex_unlock(&bp->link_lock);
12824                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12825                            rc);
12826                 return rc;
12827         }
12828
12829         /* Older firmware does not have supported_auto_speeds, so assume
12830          * that all supported speeds can be autonegotiated.
12831          */
12832         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12833                 link_info->support_auto_speeds = link_info->support_speeds;
12834
12835         bnxt_init_ethtool_link_settings(bp);
12836         mutex_unlock(&bp->link_lock);
12837         return 0;
12838 }
12839
12840 static int bnxt_get_max_irq(struct pci_dev *pdev)
12841 {
12842         u16 ctrl;
12843
12844         if (!pdev->msix_cap)
12845                 return 1;
12846
12847         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12848         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12849 }
12850
12851 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12852                                 int *max_cp)
12853 {
12854         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12855         int max_ring_grps = 0, max_irq;
12856
12857         *max_tx = hw_resc->max_tx_rings;
12858         *max_rx = hw_resc->max_rx_rings;
12859         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12860         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12861                         bnxt_get_ulp_msix_num(bp),
12862                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12863         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12864                 *max_cp = min_t(int, *max_cp, max_irq);
12865         max_ring_grps = hw_resc->max_hw_ring_grps;
12866         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12867                 *max_cp -= 1;
12868                 *max_rx -= 2;
12869         }
12870         if (bp->flags & BNXT_FLAG_AGG_RINGS)
12871                 *max_rx >>= 1;
12872         if (bp->flags & BNXT_FLAG_CHIP_P5) {
12873                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12874                 /* On P5 chips, max_cp output param should be available NQs */
12875                 *max_cp = max_irq;
12876         }
12877         *max_rx = min_t(int, *max_rx, max_ring_grps);
12878 }
12879
12880 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12881 {
12882         int rx, tx, cp;
12883
12884         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
12885         *max_rx = rx;
12886         *max_tx = tx;
12887         if (!rx || !tx || !cp)
12888                 return -ENOMEM;
12889
12890         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12891 }
12892
12893 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12894                                bool shared)
12895 {
12896         int rc;
12897
12898         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12899         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12900                 /* Not enough rings, try disabling agg rings. */
12901                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12902                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12903                 if (rc) {
12904                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
12905                         bp->flags |= BNXT_FLAG_AGG_RINGS;
12906                         return rc;
12907                 }
12908                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12909                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12910                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12911                 bnxt_set_ring_params(bp);
12912         }
12913
12914         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12915                 int max_cp, max_stat, max_irq;
12916
12917                 /* Reserve minimum resources for RoCE */
12918                 max_cp = bnxt_get_max_func_cp_rings(bp);
12919                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12920                 max_irq = bnxt_get_max_func_irqs(bp);
12921                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12922                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12923                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12924                         return 0;
12925
12926                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12927                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12928                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12929                 max_cp = min_t(int, max_cp, max_irq);
12930                 max_cp = min_t(int, max_cp, max_stat);
12931                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12932                 if (rc)
12933                         rc = 0;
12934         }
12935         return rc;
12936 }
12937
12938 /* In initial default shared ring setting, each shared ring must have a
12939  * RX/TX ring pair.
12940  */
12941 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12942 {
12943         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12944         bp->rx_nr_rings = bp->cp_nr_rings;
12945         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12946         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12947 }
12948
12949 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12950 {
12951         int dflt_rings, max_rx_rings, max_tx_rings, rc;
12952
12953         if (!bnxt_can_reserve_rings(bp))
12954                 return 0;
12955
12956         if (sh)
12957                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
12958         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
12959         /* Reduce default rings on multi-port cards so that total default
12960          * rings do not exceed CPU count.
12961          */
12962         if (bp->port_count > 1) {
12963                 int max_rings =
12964                         max_t(int, num_online_cpus() / bp->port_count, 1);
12965
12966                 dflt_rings = min_t(int, dflt_rings, max_rings);
12967         }
12968         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
12969         if (rc)
12970                 return rc;
12971         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12972         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
12973         if (sh)
12974                 bnxt_trim_dflt_sh_rings(bp);
12975         else
12976                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12977         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12978
12979         rc = __bnxt_reserve_rings(bp);
12980         if (rc)
12981                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
12982         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12983         if (sh)
12984                 bnxt_trim_dflt_sh_rings(bp);
12985
12986         /* Rings may have been trimmed, re-reserve the trimmed rings. */
12987         if (bnxt_need_reserve_rings(bp)) {
12988                 rc = __bnxt_reserve_rings(bp);
12989                 if (rc)
12990                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12991                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12992         }
12993         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12994                 bp->rx_nr_rings++;
12995                 bp->cp_nr_rings++;
12996         }
12997         if (rc) {
12998                 bp->tx_nr_rings = 0;
12999                 bp->rx_nr_rings = 0;
13000         }
13001         return rc;
13002 }
13003
13004 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13005 {
13006         int rc;
13007
13008         if (bp->tx_nr_rings)
13009                 return 0;
13010
13011         bnxt_ulp_irq_stop(bp);
13012         bnxt_clear_int_mode(bp);
13013         rc = bnxt_set_dflt_rings(bp, true);
13014         if (rc) {
13015                 netdev_err(bp->dev, "Not enough rings available.\n");
13016                 goto init_dflt_ring_err;
13017         }
13018         rc = bnxt_init_int_mode(bp);
13019         if (rc)
13020                 goto init_dflt_ring_err;
13021
13022         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13023         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
13024                 bp->flags |= BNXT_FLAG_RFS;
13025                 bp->dev->features |= NETIF_F_NTUPLE;
13026         }
13027 init_dflt_ring_err:
13028         bnxt_ulp_irq_restart(bp, rc);
13029         return rc;
13030 }
13031
13032 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13033 {
13034         int rc;
13035
13036         ASSERT_RTNL();
13037         bnxt_hwrm_func_qcaps(bp);
13038
13039         if (netif_running(bp->dev))
13040                 __bnxt_close_nic(bp, true, false);
13041
13042         bnxt_ulp_irq_stop(bp);
13043         bnxt_clear_int_mode(bp);
13044         rc = bnxt_init_int_mode(bp);
13045         bnxt_ulp_irq_restart(bp, rc);
13046
13047         if (netif_running(bp->dev)) {
13048                 if (rc)
13049                         dev_close(bp->dev);
13050                 else
13051                         rc = bnxt_open_nic(bp, true, false);
13052         }
13053
13054         return rc;
13055 }
13056
13057 static int bnxt_init_mac_addr(struct bnxt *bp)
13058 {
13059         int rc = 0;
13060
13061         if (BNXT_PF(bp)) {
13062                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
13063         } else {
13064 #ifdef CONFIG_BNXT_SRIOV
13065                 struct bnxt_vf_info *vf = &bp->vf;
13066                 bool strict_approval = true;
13067
13068                 if (is_valid_ether_addr(vf->mac_addr)) {
13069                         /* overwrite netdev dev_addr with admin VF MAC */
13070                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
13071                         /* Older PF driver or firmware may not approve this
13072                          * correctly.
13073                          */
13074                         strict_approval = false;
13075                 } else {
13076                         eth_hw_addr_random(bp->dev);
13077                 }
13078                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13079 #endif
13080         }
13081         return rc;
13082 }
13083
13084 #define BNXT_VPD_LEN    512
13085 static void bnxt_vpd_read_info(struct bnxt *bp)
13086 {
13087         struct pci_dev *pdev = bp->pdev;
13088         int i, len, pos, ro_size, size;
13089         ssize_t vpd_size;
13090         u8 *vpd_data;
13091
13092         vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
13093         if (!vpd_data)
13094                 return;
13095
13096         vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
13097         if (vpd_size <= 0) {
13098                 netdev_err(bp->dev, "Unable to read VPD\n");
13099                 goto exit;
13100         }
13101
13102         i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
13103         if (i < 0) {
13104                 netdev_err(bp->dev, "VPD READ-Only not found\n");
13105                 goto exit;
13106         }
13107
13108         i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
13109         if (i < 0) {
13110                 netdev_err(bp->dev, "VPD READ-Only not found\n");
13111                 goto exit;
13112         }
13113
13114         ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
13115         i += PCI_VPD_LRDT_TAG_SIZE;
13116         if (i + ro_size > vpd_size)
13117                 goto exit;
13118
13119         pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13120                                         PCI_VPD_RO_KEYWORD_PARTNO);
13121         if (pos < 0)
13122                 goto read_sn;
13123
13124         len = pci_vpd_info_field_size(&vpd_data[pos]);
13125         pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13126         if (len + pos > vpd_size)
13127                 goto read_sn;
13128
13129         size = min(len, BNXT_VPD_FLD_LEN - 1);
13130         memcpy(bp->board_partno, &vpd_data[pos], size);
13131
13132 read_sn:
13133         pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13134                                         PCI_VPD_RO_KEYWORD_SERIALNO);
13135         if (pos < 0)
13136                 goto exit;
13137
13138         len = pci_vpd_info_field_size(&vpd_data[pos]);
13139         pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13140         if (len + pos > vpd_size)
13141                 goto exit;
13142
13143         size = min(len, BNXT_VPD_FLD_LEN - 1);
13144         memcpy(bp->board_serialno, &vpd_data[pos], size);
13145 exit:
13146         kfree(vpd_data);
13147 }
13148
13149 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13150 {
13151         struct pci_dev *pdev = bp->pdev;
13152         u64 qword;
13153
13154         qword = pci_get_dsn(pdev);
13155         if (!qword) {
13156                 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13157                 return -EOPNOTSUPP;
13158         }
13159
13160         put_unaligned_le64(qword, dsn);
13161
13162         bp->flags |= BNXT_FLAG_DSN_VALID;
13163         return 0;
13164 }
13165
13166 static int bnxt_map_db_bar(struct bnxt *bp)
13167 {
13168         if (!bp->db_size)
13169                 return -ENODEV;
13170         bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13171         if (!bp->bar1)
13172                 return -ENOMEM;
13173         return 0;
13174 }
13175
13176 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13177 {
13178         struct net_device *dev;
13179         struct bnxt *bp;
13180         int rc, max_irqs;
13181
13182         if (pci_is_bridge(pdev))
13183                 return -ENODEV;
13184
13185         /* Clear any pending DMA transactions from crash kernel
13186          * while loading driver in capture kernel.
13187          */
13188         if (is_kdump_kernel()) {
13189                 pci_clear_master(pdev);
13190                 pcie_flr(pdev);
13191         }
13192
13193         max_irqs = bnxt_get_max_irq(pdev);
13194         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13195         if (!dev)
13196                 return -ENOMEM;
13197
13198         bp = netdev_priv(dev);
13199         bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13200         bnxt_set_max_func_irqs(bp, max_irqs);
13201
13202         if (bnxt_vf_pciid(ent->driver_data))
13203                 bp->flags |= BNXT_FLAG_VF;
13204
13205         if (pdev->msix_cap)
13206                 bp->flags |= BNXT_FLAG_MSIX_CAP;
13207
13208         rc = bnxt_init_board(pdev, dev);
13209         if (rc < 0)
13210                 goto init_err_free;
13211
13212         dev->netdev_ops = &bnxt_netdev_ops;
13213         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13214         dev->ethtool_ops = &bnxt_ethtool_ops;
13215         pci_set_drvdata(pdev, dev);
13216
13217         rc = bnxt_alloc_hwrm_resources(bp);
13218         if (rc)
13219                 goto init_err_pci_clean;
13220
13221         mutex_init(&bp->hwrm_cmd_lock);
13222         mutex_init(&bp->link_lock);
13223
13224         rc = bnxt_fw_init_one_p1(bp);
13225         if (rc)
13226                 goto init_err_pci_clean;
13227
13228         if (BNXT_PF(bp))
13229                 bnxt_vpd_read_info(bp);
13230
13231         if (BNXT_CHIP_P5(bp)) {
13232                 bp->flags |= BNXT_FLAG_CHIP_P5;
13233                 if (BNXT_CHIP_SR2(bp))
13234                         bp->flags |= BNXT_FLAG_CHIP_SR2;
13235         }
13236
13237         rc = bnxt_alloc_rss_indir_tbl(bp);
13238         if (rc)
13239                 goto init_err_pci_clean;
13240
13241         rc = bnxt_fw_init_one_p2(bp);
13242         if (rc)
13243                 goto init_err_pci_clean;
13244
13245         rc = bnxt_map_db_bar(bp);
13246         if (rc) {
13247                 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13248                         rc);
13249                 goto init_err_pci_clean;
13250         }
13251
13252         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13253                            NETIF_F_TSO | NETIF_F_TSO6 |
13254                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13255                            NETIF_F_GSO_IPXIP4 |
13256                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13257                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13258                            NETIF_F_RXCSUM | NETIF_F_GRO;
13259
13260         if (BNXT_SUPPORTS_TPA(bp))
13261                 dev->hw_features |= NETIF_F_LRO;
13262
13263         dev->hw_enc_features =
13264                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13265                         NETIF_F_TSO | NETIF_F_TSO6 |
13266                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13267                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13268                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13269         dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13270
13271         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13272                                     NETIF_F_GSO_GRE_CSUM;
13273         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13274         if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13275                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13276         if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13277                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13278         if (BNXT_SUPPORTS_TPA(bp))
13279                 dev->hw_features |= NETIF_F_GRO_HW;
13280         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13281         if (dev->features & NETIF_F_GRO_HW)
13282                 dev->features &= ~NETIF_F_LRO;
13283         dev->priv_flags |= IFF_UNICAST_FLT;
13284
13285 #ifdef CONFIG_BNXT_SRIOV
13286         init_waitqueue_head(&bp->sriov_cfg_wait);
13287         mutex_init(&bp->sriov_lock);
13288 #endif
13289         if (BNXT_SUPPORTS_TPA(bp)) {
13290                 bp->gro_func = bnxt_gro_func_5730x;
13291                 if (BNXT_CHIP_P4(bp))
13292                         bp->gro_func = bnxt_gro_func_5731x;
13293                 else if (BNXT_CHIP_P5(bp))
13294                         bp->gro_func = bnxt_gro_func_5750x;
13295         }
13296         if (!BNXT_CHIP_P4_PLUS(bp))
13297                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13298
13299         rc = bnxt_init_mac_addr(bp);
13300         if (rc) {
13301                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13302                 rc = -EADDRNOTAVAIL;
13303                 goto init_err_pci_clean;
13304         }
13305
13306         if (BNXT_PF(bp)) {
13307                 /* Read the adapter's DSN to use as the eswitch switch_id */
13308                 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13309         }
13310
13311         /* MTU range: 60 - FW defined max */
13312         dev->min_mtu = ETH_ZLEN;
13313         dev->max_mtu = bp->max_mtu;
13314
13315         rc = bnxt_probe_phy(bp, true);
13316         if (rc)
13317                 goto init_err_pci_clean;
13318
13319         bnxt_set_rx_skb_mode(bp, false);
13320         bnxt_set_tpa_flags(bp);
13321         bnxt_set_ring_params(bp);
13322         rc = bnxt_set_dflt_rings(bp, true);
13323         if (rc) {
13324                 netdev_err(bp->dev, "Not enough rings available.\n");
13325                 rc = -ENOMEM;
13326                 goto init_err_pci_clean;
13327         }
13328
13329         bnxt_fw_init_one_p3(bp);
13330
13331         if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13332                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13333
13334         rc = bnxt_init_int_mode(bp);
13335         if (rc)
13336                 goto init_err_pci_clean;
13337
13338         /* No TC has been set yet and rings may have been trimmed due to
13339          * limited MSIX, so we re-initialize the TX rings per TC.
13340          */
13341         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13342
13343         if (BNXT_PF(bp)) {
13344                 if (!bnxt_pf_wq) {
13345                         bnxt_pf_wq =
13346                                 create_singlethread_workqueue("bnxt_pf_wq");
13347                         if (!bnxt_pf_wq) {
13348                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13349                                 rc = -ENOMEM;
13350                                 goto init_err_pci_clean;
13351                         }
13352                 }
13353                 rc = bnxt_init_tc(bp);
13354                 if (rc)
13355                         netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13356                                    rc);
13357         }
13358
13359         bnxt_inv_fw_health_reg(bp);
13360         bnxt_dl_register(bp);
13361
13362         rc = register_netdev(dev);
13363         if (rc)
13364                 goto init_err_cleanup;
13365
13366         if (BNXT_PF(bp))
13367                 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13368         bnxt_dl_fw_reporters_create(bp);
13369
13370         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13371                     board_info[ent->driver_data].name,
13372                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
13373         pcie_print_link_status(pdev);
13374
13375         pci_save_state(pdev);
13376         return 0;
13377
13378 init_err_cleanup:
13379         bnxt_dl_unregister(bp);
13380         bnxt_shutdown_tc(bp);
13381         bnxt_clear_int_mode(bp);
13382
13383 init_err_pci_clean:
13384         bnxt_hwrm_func_drv_unrgtr(bp);
13385         bnxt_free_hwrm_resources(bp);
13386         bnxt_ethtool_free(bp);
13387         bnxt_ptp_clear(bp);
13388         kfree(bp->ptp_cfg);
13389         bp->ptp_cfg = NULL;
13390         kfree(bp->fw_health);
13391         bp->fw_health = NULL;
13392         bnxt_cleanup_pci(bp);
13393         bnxt_free_ctx_mem(bp);
13394         kfree(bp->ctx);
13395         bp->ctx = NULL;
13396         kfree(bp->rss_indir_tbl);
13397         bp->rss_indir_tbl = NULL;
13398
13399 init_err_free:
13400         free_netdev(dev);
13401         return rc;
13402 }
13403
13404 static void bnxt_shutdown(struct pci_dev *pdev)
13405 {
13406         struct net_device *dev = pci_get_drvdata(pdev);
13407         struct bnxt *bp;
13408
13409         if (!dev)
13410                 return;
13411
13412         rtnl_lock();
13413         bp = netdev_priv(dev);
13414         if (!bp)
13415                 goto shutdown_exit;
13416
13417         if (netif_running(dev))
13418                 dev_close(dev);
13419
13420         bnxt_ulp_shutdown(bp);
13421         bnxt_clear_int_mode(bp);
13422         pci_disable_device(pdev);
13423
13424         if (system_state == SYSTEM_POWER_OFF) {
13425                 pci_wake_from_d3(pdev, bp->wol);
13426                 pci_set_power_state(pdev, PCI_D3hot);
13427         }
13428
13429 shutdown_exit:
13430         rtnl_unlock();
13431 }
13432
13433 #ifdef CONFIG_PM_SLEEP
13434 static int bnxt_suspend(struct device *device)
13435 {
13436         struct net_device *dev = dev_get_drvdata(device);
13437         struct bnxt *bp = netdev_priv(dev);
13438         int rc = 0;
13439
13440         rtnl_lock();
13441         bnxt_ulp_stop(bp);
13442         if (netif_running(dev)) {
13443                 netif_device_detach(dev);
13444                 rc = bnxt_close(dev);
13445         }
13446         bnxt_hwrm_func_drv_unrgtr(bp);
13447         pci_disable_device(bp->pdev);
13448         bnxt_free_ctx_mem(bp);
13449         kfree(bp->ctx);
13450         bp->ctx = NULL;
13451         rtnl_unlock();
13452         return rc;
13453 }
13454
13455 static int bnxt_resume(struct device *device)
13456 {
13457         struct net_device *dev = dev_get_drvdata(device);
13458         struct bnxt *bp = netdev_priv(dev);
13459         int rc = 0;
13460
13461         rtnl_lock();
13462         rc = pci_enable_device(bp->pdev);
13463         if (rc) {
13464                 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13465                            rc);
13466                 goto resume_exit;
13467         }
13468         pci_set_master(bp->pdev);
13469         if (bnxt_hwrm_ver_get(bp)) {
13470                 rc = -ENODEV;
13471                 goto resume_exit;
13472         }
13473         rc = bnxt_hwrm_func_reset(bp);
13474         if (rc) {
13475                 rc = -EBUSY;
13476                 goto resume_exit;
13477         }
13478
13479         rc = bnxt_hwrm_func_qcaps(bp);
13480         if (rc)
13481                 goto resume_exit;
13482
13483         if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13484                 rc = -ENODEV;
13485                 goto resume_exit;
13486         }
13487
13488         bnxt_get_wol_settings(bp);
13489         if (netif_running(dev)) {
13490                 rc = bnxt_open(dev);
13491                 if (!rc)
13492                         netif_device_attach(dev);
13493         }
13494
13495 resume_exit:
13496         bnxt_ulp_start(bp, rc);
13497         if (!rc)
13498                 bnxt_reenable_sriov(bp);
13499         rtnl_unlock();
13500         return rc;
13501 }
13502
13503 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13504 #define BNXT_PM_OPS (&bnxt_pm_ops)
13505
13506 #else
13507
13508 #define BNXT_PM_OPS NULL
13509
13510 #endif /* CONFIG_PM_SLEEP */
13511
13512 /**
13513  * bnxt_io_error_detected - called when PCI error is detected
13514  * @pdev: Pointer to PCI device
13515  * @state: The current pci connection state
13516  *
13517  * This function is called after a PCI bus error affecting
13518  * this device has been detected.
13519  */
13520 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13521                                                pci_channel_state_t state)
13522 {
13523         struct net_device *netdev = pci_get_drvdata(pdev);
13524         struct bnxt *bp = netdev_priv(netdev);
13525
13526         netdev_info(netdev, "PCI I/O error detected\n");
13527
13528         rtnl_lock();
13529         netif_device_detach(netdev);
13530
13531         bnxt_ulp_stop(bp);
13532
13533         if (state == pci_channel_io_perm_failure) {
13534                 rtnl_unlock();
13535                 return PCI_ERS_RESULT_DISCONNECT;
13536         }
13537
13538         if (state == pci_channel_io_frozen)
13539                 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13540
13541         if (netif_running(netdev))
13542                 bnxt_close(netdev);
13543
13544         if (pci_is_enabled(pdev))
13545                 pci_disable_device(pdev);
13546         bnxt_free_ctx_mem(bp);
13547         kfree(bp->ctx);
13548         bp->ctx = NULL;
13549         rtnl_unlock();
13550
13551         /* Request a slot slot reset. */
13552         return PCI_ERS_RESULT_NEED_RESET;
13553 }
13554
13555 /**
13556  * bnxt_io_slot_reset - called after the pci bus has been reset.
13557  * @pdev: Pointer to PCI device
13558  *
13559  * Restart the card from scratch, as if from a cold-boot.
13560  * At this point, the card has exprienced a hard reset,
13561  * followed by fixups by BIOS, and has its config space
13562  * set up identically to what it was at cold boot.
13563  */
13564 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13565 {
13566         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13567         struct net_device *netdev = pci_get_drvdata(pdev);
13568         struct bnxt *bp = netdev_priv(netdev);
13569         int err = 0, off;
13570
13571         netdev_info(bp->dev, "PCI Slot Reset\n");
13572
13573         rtnl_lock();
13574
13575         if (pci_enable_device(pdev)) {
13576                 dev_err(&pdev->dev,
13577                         "Cannot re-enable PCI device after reset.\n");
13578         } else {
13579                 pci_set_master(pdev);
13580                 /* Upon fatal error, our device internal logic that latches to
13581                  * BAR value is getting reset and will restore only upon
13582                  * rewritting the BARs.
13583                  *
13584                  * As pci_restore_state() does not re-write the BARs if the
13585                  * value is same as saved value earlier, driver needs to
13586                  * write the BARs to 0 to force restore, in case of fatal error.
13587                  */
13588                 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13589                                        &bp->state)) {
13590                         for (off = PCI_BASE_ADDRESS_0;
13591                              off <= PCI_BASE_ADDRESS_5; off += 4)
13592                                 pci_write_config_dword(bp->pdev, off, 0);
13593                 }
13594                 pci_restore_state(pdev);
13595                 pci_save_state(pdev);
13596
13597                 err = bnxt_hwrm_func_reset(bp);
13598                 if (!err)
13599                         result = PCI_ERS_RESULT_RECOVERED;
13600         }
13601
13602         rtnl_unlock();
13603
13604         return result;
13605 }
13606
13607 /**
13608  * bnxt_io_resume - called when traffic can start flowing again.
13609  * @pdev: Pointer to PCI device
13610  *
13611  * This callback is called when the error recovery driver tells
13612  * us that its OK to resume normal operation.
13613  */
13614 static void bnxt_io_resume(struct pci_dev *pdev)
13615 {
13616         struct net_device *netdev = pci_get_drvdata(pdev);
13617         struct bnxt *bp = netdev_priv(netdev);
13618         int err;
13619
13620         netdev_info(bp->dev, "PCI Slot Resume\n");
13621         rtnl_lock();
13622
13623         err = bnxt_hwrm_func_qcaps(bp);
13624         if (!err && netif_running(netdev))
13625                 err = bnxt_open(netdev);
13626
13627         bnxt_ulp_start(bp, err);
13628         if (!err) {
13629                 bnxt_reenable_sriov(bp);
13630                 netif_device_attach(netdev);
13631         }
13632
13633         rtnl_unlock();
13634 }
13635
13636 static const struct pci_error_handlers bnxt_err_handler = {
13637         .error_detected = bnxt_io_error_detected,
13638         .slot_reset     = bnxt_io_slot_reset,
13639         .resume         = bnxt_io_resume
13640 };
13641
13642 static struct pci_driver bnxt_pci_driver = {
13643         .name           = DRV_MODULE_NAME,
13644         .id_table       = bnxt_pci_tbl,
13645         .probe          = bnxt_init_one,
13646         .remove         = bnxt_remove_one,
13647         .shutdown       = bnxt_shutdown,
13648         .driver.pm      = BNXT_PM_OPS,
13649         .err_handler    = &bnxt_err_handler,
13650 #if defined(CONFIG_BNXT_SRIOV)
13651         .sriov_configure = bnxt_sriov_configure,
13652 #endif
13653 };
13654
13655 static int __init bnxt_init(void)
13656 {
13657         bnxt_debug_init();
13658         return pci_register_driver(&bnxt_pci_driver);
13659 }
13660
13661 static void __exit bnxt_exit(void)
13662 {
13663         pci_unregister_driver(&bnxt_pci_driver);
13664         if (bnxt_pf_wq)
13665                 destroy_workqueue(bnxt_pf_wq);
13666         bnxt_debug_exit();
13667 }
13668
13669 module_init(bnxt_init);
13670 module_exit(bnxt_exit);