Merge tag 's390-5.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/ptp_clock_kernel.h>
53 #include <linux/timecounter.h>
54 #include <linux/cpu_rmap.h>
55 #include <linux/cpumask.h>
56 #include <net/pkt_cls.h>
57 #include <linux/hwmon.h>
58 #include <linux/hwmon-sysfs.h>
59 #include <net/page_pool.h>
60
61 #include "bnxt_hsi.h"
62 #include "bnxt.h"
63 #include "bnxt_ulp.h"
64 #include "bnxt_sriov.h"
65 #include "bnxt_ethtool.h"
66 #include "bnxt_dcb.h"
67 #include "bnxt_xdp.h"
68 #include "bnxt_ptp.h"
69 #include "bnxt_vfr.h"
70 #include "bnxt_tc.h"
71 #include "bnxt_devlink.h"
72 #include "bnxt_debugfs.h"
73
74 #define BNXT_TX_TIMEOUT         (5 * HZ)
75 #define BNXT_DEF_MSG_ENABLE     (NETIF_MSG_DRV | NETIF_MSG_HW | \
76                                  NETIF_MSG_TX_ERR)
77
78 MODULE_LICENSE("GPL");
79 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
80
81 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
82 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
83 #define BNXT_RX_COPY_THRESH 256
84
85 #define BNXT_TX_PUSH_THRESH 164
86
87 enum board_idx {
88         BCM57301,
89         BCM57302,
90         BCM57304,
91         BCM57417_NPAR,
92         BCM58700,
93         BCM57311,
94         BCM57312,
95         BCM57402,
96         BCM57404,
97         BCM57406,
98         BCM57402_NPAR,
99         BCM57407,
100         BCM57412,
101         BCM57414,
102         BCM57416,
103         BCM57417,
104         BCM57412_NPAR,
105         BCM57314,
106         BCM57417_SFP,
107         BCM57416_SFP,
108         BCM57404_NPAR,
109         BCM57406_NPAR,
110         BCM57407_SFP,
111         BCM57407_NPAR,
112         BCM57414_NPAR,
113         BCM57416_NPAR,
114         BCM57452,
115         BCM57454,
116         BCM5745x_NPAR,
117         BCM57508,
118         BCM57504,
119         BCM57502,
120         BCM57508_NPAR,
121         BCM57504_NPAR,
122         BCM57502_NPAR,
123         BCM58802,
124         BCM58804,
125         BCM58808,
126         NETXTREME_E_VF,
127         NETXTREME_C_VF,
128         NETXTREME_S_VF,
129         NETXTREME_C_VF_HV,
130         NETXTREME_E_VF_HV,
131         NETXTREME_E_P5_VF,
132         NETXTREME_E_P5_VF_HV,
133 };
134
135 /* indexed by enum above */
136 static const struct {
137         char *name;
138 } board_info[] = {
139         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
140         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
141         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
142         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
143         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
144         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
145         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
146         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
147         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
148         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
149         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
150         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
151         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
152         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
153         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
154         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
155         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
156         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
157         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
158         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
159         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
160         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
161         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
162         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
163         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
164         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
165         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
166         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
167         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
168         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
169         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
170         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
171         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
172         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
173         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
174         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
175         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
176         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
177         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
178         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
179         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
180         [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
181         [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
182         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
183         [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
184 };
185
186 static const struct pci_device_id bnxt_pci_tbl[] = {
187         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
188         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
189         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
190         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
191         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
192         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
193         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
194         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
195         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
196         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
197         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
198         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
199         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
200         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
201         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
202         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
203         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
204         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
205         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
206         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
207         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
208         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
209         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
210         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
211         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
212         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
213         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
214         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
215         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
216         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
217         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
218         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
219         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
220         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
221         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
222         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
223         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
224         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
225         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
226         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
227         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
228         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
229         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
230         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
231         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
232         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
233 #ifdef CONFIG_BNXT_SRIOV
234         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
235         { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
236         { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
237         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
238         { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
239         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
240         { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
241         { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
242         { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
243         { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
244         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
245         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
246         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
247         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
248         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
249         { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
250         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
251         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
252         { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
253         { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
254         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
255 #endif
256         { 0 }
257 };
258
259 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
260
261 static const u16 bnxt_vf_req_snif[] = {
262         HWRM_FUNC_CFG,
263         HWRM_FUNC_VF_CFG,
264         HWRM_PORT_PHY_QCFG,
265         HWRM_CFA_L2_FILTER_ALLOC,
266 };
267
268 static const u16 bnxt_async_events_arr[] = {
269         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
270         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
271         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
272         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
273         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
274         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
275         ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
276         ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
277         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
278         ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
279         ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
280         ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
281 };
282
283 static struct workqueue_struct *bnxt_pf_wq;
284
285 static bool bnxt_vf_pciid(enum board_idx idx)
286 {
287         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
288                 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
289                 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
290                 idx == NETXTREME_E_P5_VF_HV);
291 }
292
293 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
294 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
295 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
296
297 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
298                 writel(DB_CP_IRQ_DIS_FLAGS, db)
299
300 #define BNXT_DB_CQ(db, idx)                                             \
301         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
302
303 #define BNXT_DB_NQ_P5(db, idx)                                          \
304         writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
305
306 #define BNXT_DB_CQ_ARM(db, idx)                                         \
307         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
308
309 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
310         writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
311
312 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
313 {
314         if (bp->flags & BNXT_FLAG_CHIP_P5)
315                 BNXT_DB_NQ_P5(db, idx);
316         else
317                 BNXT_DB_CQ(db, idx);
318 }
319
320 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
321 {
322         if (bp->flags & BNXT_FLAG_CHIP_P5)
323                 BNXT_DB_NQ_ARM_P5(db, idx);
324         else
325                 BNXT_DB_CQ_ARM(db, idx);
326 }
327
328 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
329 {
330         if (bp->flags & BNXT_FLAG_CHIP_P5)
331                 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
332                        db->doorbell);
333         else
334                 BNXT_DB_CQ(db, idx);
335 }
336
337 const u16 bnxt_lhint_arr[] = {
338         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
339         TX_BD_FLAGS_LHINT_512_TO_1023,
340         TX_BD_FLAGS_LHINT_1024_TO_2047,
341         TX_BD_FLAGS_LHINT_1024_TO_2047,
342         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
343         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
344         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
345         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
346         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
347         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
348         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
357 };
358
359 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
360 {
361         struct metadata_dst *md_dst = skb_metadata_dst(skb);
362
363         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
364                 return 0;
365
366         return md_dst->u.port_info.port_id;
367 }
368
369 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
370                              u16 prod)
371 {
372         bnxt_db_write(bp, &txr->tx_db, prod);
373         txr->kick_pending = 0;
374 }
375
376 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
377                                           struct bnxt_tx_ring_info *txr,
378                                           struct netdev_queue *txq)
379 {
380         netif_tx_stop_queue(txq);
381
382         /* netif_tx_stop_queue() must be done before checking
383          * tx index in bnxt_tx_avail() below, because in
384          * bnxt_tx_int(), we update tx index before checking for
385          * netif_tx_queue_stopped().
386          */
387         smp_mb();
388         if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
389                 netif_tx_wake_queue(txq);
390                 return false;
391         }
392
393         return true;
394 }
395
396 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
397 {
398         struct bnxt *bp = netdev_priv(dev);
399         struct tx_bd *txbd;
400         struct tx_bd_ext *txbd1;
401         struct netdev_queue *txq;
402         int i;
403         dma_addr_t mapping;
404         unsigned int length, pad = 0;
405         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
406         u16 prod, last_frag;
407         struct pci_dev *pdev = bp->pdev;
408         struct bnxt_tx_ring_info *txr;
409         struct bnxt_sw_tx_bd *tx_buf;
410         __le32 lflags = 0;
411
412         i = skb_get_queue_mapping(skb);
413         if (unlikely(i >= bp->tx_nr_rings)) {
414                 dev_kfree_skb_any(skb);
415                 atomic_long_inc(&dev->tx_dropped);
416                 return NETDEV_TX_OK;
417         }
418
419         txq = netdev_get_tx_queue(dev, i);
420         txr = &bp->tx_ring[bp->tx_ring_map[i]];
421         prod = txr->tx_prod;
422
423         free_size = bnxt_tx_avail(bp, txr);
424         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
425                 /* We must have raced with NAPI cleanup */
426                 if (net_ratelimit() && txr->kick_pending)
427                         netif_warn(bp, tx_err, dev,
428                                    "bnxt: ring busy w/ flush pending!\n");
429                 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
430                         return NETDEV_TX_BUSY;
431         }
432
433         length = skb->len;
434         len = skb_headlen(skb);
435         last_frag = skb_shinfo(skb)->nr_frags;
436
437         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
438
439         txbd->tx_bd_opaque = prod;
440
441         tx_buf = &txr->tx_buf_ring[prod];
442         tx_buf->skb = skb;
443         tx_buf->nr_frags = last_frag;
444
445         vlan_tag_flags = 0;
446         cfa_action = bnxt_xmit_get_cfa_action(skb);
447         if (skb_vlan_tag_present(skb)) {
448                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
449                                  skb_vlan_tag_get(skb);
450                 /* Currently supports 8021Q, 8021AD vlan offloads
451                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
452                  */
453                 if (skb->vlan_proto == htons(ETH_P_8021Q))
454                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
455         }
456
457         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
458                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
459
460                 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
461                     atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
462                         if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
463                                             &ptp->tx_hdr_off)) {
464                                 if (vlan_tag_flags)
465                                         ptp->tx_hdr_off += VLAN_HLEN;
466                                 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
467                                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
468                         } else {
469                                 atomic_inc(&bp->ptp_cfg->tx_avail);
470                         }
471                 }
472         }
473
474         if (unlikely(skb->no_fcs))
475                 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
476
477         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
478             !lflags) {
479                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
480                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
481                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
482                 void __iomem *db = txr->tx_db.doorbell;
483                 void *pdata = tx_push_buf->data;
484                 u64 *end;
485                 int j, push_len;
486
487                 /* Set COAL_NOW to be ready quickly for the next push */
488                 tx_push->tx_bd_len_flags_type =
489                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
490                                         TX_BD_TYPE_LONG_TX_BD |
491                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
492                                         TX_BD_FLAGS_COAL_NOW |
493                                         TX_BD_FLAGS_PACKET_END |
494                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
495
496                 if (skb->ip_summed == CHECKSUM_PARTIAL)
497                         tx_push1->tx_bd_hsize_lflags =
498                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
499                 else
500                         tx_push1->tx_bd_hsize_lflags = 0;
501
502                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
503                 tx_push1->tx_bd_cfa_action =
504                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
505
506                 end = pdata + length;
507                 end = PTR_ALIGN(end, 8) - 1;
508                 *end = 0;
509
510                 skb_copy_from_linear_data(skb, pdata, len);
511                 pdata += len;
512                 for (j = 0; j < last_frag; j++) {
513                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
514                         void *fptr;
515
516                         fptr = skb_frag_address_safe(frag);
517                         if (!fptr)
518                                 goto normal_tx;
519
520                         memcpy(pdata, fptr, skb_frag_size(frag));
521                         pdata += skb_frag_size(frag);
522                 }
523
524                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
525                 txbd->tx_bd_haddr = txr->data_mapping;
526                 prod = NEXT_TX(prod);
527                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
528                 memcpy(txbd, tx_push1, sizeof(*txbd));
529                 prod = NEXT_TX(prod);
530                 tx_push->doorbell =
531                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
532                 txr->tx_prod = prod;
533
534                 tx_buf->is_push = 1;
535                 netdev_tx_sent_queue(txq, skb->len);
536                 wmb();  /* Sync is_push and byte queue before pushing data */
537
538                 push_len = (length + sizeof(*tx_push) + 7) / 8;
539                 if (push_len > 16) {
540                         __iowrite64_copy(db, tx_push_buf, 16);
541                         __iowrite32_copy(db + 4, tx_push_buf + 1,
542                                          (push_len - 16) << 1);
543                 } else {
544                         __iowrite64_copy(db, tx_push_buf, push_len);
545                 }
546
547                 goto tx_done;
548         }
549
550 normal_tx:
551         if (length < BNXT_MIN_PKT_SIZE) {
552                 pad = BNXT_MIN_PKT_SIZE - length;
553                 if (skb_pad(skb, pad))
554                         /* SKB already freed. */
555                         goto tx_kick_pending;
556                 length = BNXT_MIN_PKT_SIZE;
557         }
558
559         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
560
561         if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
562                 goto tx_free;
563
564         dma_unmap_addr_set(tx_buf, mapping, mapping);
565         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
566                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
567
568         txbd->tx_bd_haddr = cpu_to_le64(mapping);
569
570         prod = NEXT_TX(prod);
571         txbd1 = (struct tx_bd_ext *)
572                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
573
574         txbd1->tx_bd_hsize_lflags = lflags;
575         if (skb_is_gso(skb)) {
576                 u32 hdr_len;
577
578                 if (skb->encapsulation)
579                         hdr_len = skb_inner_network_offset(skb) +
580                                 skb_inner_network_header_len(skb) +
581                                 inner_tcp_hdrlen(skb);
582                 else
583                         hdr_len = skb_transport_offset(skb) +
584                                 tcp_hdrlen(skb);
585
586                 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
587                                         TX_BD_FLAGS_T_IPID |
588                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
589                 length = skb_shinfo(skb)->gso_size;
590                 txbd1->tx_bd_mss = cpu_to_le32(length);
591                 length += hdr_len;
592         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
593                 txbd1->tx_bd_hsize_lflags |=
594                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
595                 txbd1->tx_bd_mss = 0;
596         }
597
598         length >>= 9;
599         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
600                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
601                                      skb->len);
602                 i = 0;
603                 goto tx_dma_error;
604         }
605         flags |= bnxt_lhint_arr[length];
606         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
607
608         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
609         txbd1->tx_bd_cfa_action =
610                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
611         for (i = 0; i < last_frag; i++) {
612                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
613
614                 prod = NEXT_TX(prod);
615                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
616
617                 len = skb_frag_size(frag);
618                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
619                                            DMA_TO_DEVICE);
620
621                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
622                         goto tx_dma_error;
623
624                 tx_buf = &txr->tx_buf_ring[prod];
625                 dma_unmap_addr_set(tx_buf, mapping, mapping);
626
627                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
628
629                 flags = len << TX_BD_LEN_SHIFT;
630                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
631         }
632
633         flags &= ~TX_BD_LEN;
634         txbd->tx_bd_len_flags_type =
635                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
636                             TX_BD_FLAGS_PACKET_END);
637
638         netdev_tx_sent_queue(txq, skb->len);
639
640         skb_tx_timestamp(skb);
641
642         /* Sync BD data before updating doorbell */
643         wmb();
644
645         prod = NEXT_TX(prod);
646         txr->tx_prod = prod;
647
648         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
649                 bnxt_txr_db_kick(bp, txr, prod);
650         else
651                 txr->kick_pending = 1;
652
653 tx_done:
654
655         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
656                 if (netdev_xmit_more() && !tx_buf->is_push)
657                         bnxt_txr_db_kick(bp, txr, prod);
658
659                 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
660         }
661         return NETDEV_TX_OK;
662
663 tx_dma_error:
664         if (BNXT_TX_PTP_IS_SET(lflags))
665                 atomic_inc(&bp->ptp_cfg->tx_avail);
666
667         last_frag = i;
668
669         /* start back at beginning and unmap skb */
670         prod = txr->tx_prod;
671         tx_buf = &txr->tx_buf_ring[prod];
672         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
673                          skb_headlen(skb), PCI_DMA_TODEVICE);
674         prod = NEXT_TX(prod);
675
676         /* unmap remaining mapped pages */
677         for (i = 0; i < last_frag; i++) {
678                 prod = NEXT_TX(prod);
679                 tx_buf = &txr->tx_buf_ring[prod];
680                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
681                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
682                                PCI_DMA_TODEVICE);
683         }
684
685 tx_free:
686         dev_kfree_skb_any(skb);
687 tx_kick_pending:
688         if (txr->kick_pending)
689                 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
690         txr->tx_buf_ring[txr->tx_prod].skb = NULL;
691         atomic_long_inc(&dev->tx_dropped);
692         return NETDEV_TX_OK;
693 }
694
695 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
696 {
697         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
698         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
699         u16 cons = txr->tx_cons;
700         struct pci_dev *pdev = bp->pdev;
701         int i;
702         unsigned int tx_bytes = 0;
703
704         for (i = 0; i < nr_pkts; i++) {
705                 struct bnxt_sw_tx_bd *tx_buf;
706                 bool compl_deferred = false;
707                 struct sk_buff *skb;
708                 int j, last;
709
710                 tx_buf = &txr->tx_buf_ring[cons];
711                 cons = NEXT_TX(cons);
712                 skb = tx_buf->skb;
713                 tx_buf->skb = NULL;
714
715                 if (tx_buf->is_push) {
716                         tx_buf->is_push = 0;
717                         goto next_tx_int;
718                 }
719
720                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
721                                  skb_headlen(skb), PCI_DMA_TODEVICE);
722                 last = tx_buf->nr_frags;
723
724                 for (j = 0; j < last; j++) {
725                         cons = NEXT_TX(cons);
726                         tx_buf = &txr->tx_buf_ring[cons];
727                         dma_unmap_page(
728                                 &pdev->dev,
729                                 dma_unmap_addr(tx_buf, mapping),
730                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
731                                 PCI_DMA_TODEVICE);
732                 }
733                 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
734                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
735                                 if (!bnxt_get_tx_ts_p5(bp, skb))
736                                         compl_deferred = true;
737                                 else
738                                         atomic_inc(&bp->ptp_cfg->tx_avail);
739                         }
740                 }
741
742 next_tx_int:
743                 cons = NEXT_TX(cons);
744
745                 tx_bytes += skb->len;
746                 if (!compl_deferred)
747                         dev_kfree_skb_any(skb);
748         }
749
750         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
751         txr->tx_cons = cons;
752
753         /* Need to make the tx_cons update visible to bnxt_start_xmit()
754          * before checking for netif_tx_queue_stopped().  Without the
755          * memory barrier, there is a small possibility that bnxt_start_xmit()
756          * will miss it and cause the queue to be stopped forever.
757          */
758         smp_mb();
759
760         if (unlikely(netif_tx_queue_stopped(txq)) &&
761             bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
762             READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
763                 netif_tx_wake_queue(txq);
764 }
765
766 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
767                                          struct bnxt_rx_ring_info *rxr,
768                                          gfp_t gfp)
769 {
770         struct device *dev = &bp->pdev->dev;
771         struct page *page;
772
773         page = page_pool_dev_alloc_pages(rxr->page_pool);
774         if (!page)
775                 return NULL;
776
777         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
778                                       DMA_ATTR_WEAK_ORDERING);
779         if (dma_mapping_error(dev, *mapping)) {
780                 page_pool_recycle_direct(rxr->page_pool, page);
781                 return NULL;
782         }
783         *mapping += bp->rx_dma_offset;
784         return page;
785 }
786
787 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
788                                        gfp_t gfp)
789 {
790         u8 *data;
791         struct pci_dev *pdev = bp->pdev;
792
793         data = kmalloc(bp->rx_buf_size, gfp);
794         if (!data)
795                 return NULL;
796
797         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
798                                         bp->rx_buf_use_size, bp->rx_dir,
799                                         DMA_ATTR_WEAK_ORDERING);
800
801         if (dma_mapping_error(&pdev->dev, *mapping)) {
802                 kfree(data);
803                 data = NULL;
804         }
805         return data;
806 }
807
808 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
809                        u16 prod, gfp_t gfp)
810 {
811         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
812         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
813         dma_addr_t mapping;
814
815         if (BNXT_RX_PAGE_MODE(bp)) {
816                 struct page *page =
817                         __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
818
819                 if (!page)
820                         return -ENOMEM;
821
822                 rx_buf->data = page;
823                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
824         } else {
825                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
826
827                 if (!data)
828                         return -ENOMEM;
829
830                 rx_buf->data = data;
831                 rx_buf->data_ptr = data + bp->rx_offset;
832         }
833         rx_buf->mapping = mapping;
834
835         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
836         return 0;
837 }
838
839 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
840 {
841         u16 prod = rxr->rx_prod;
842         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
843         struct rx_bd *cons_bd, *prod_bd;
844
845         prod_rx_buf = &rxr->rx_buf_ring[prod];
846         cons_rx_buf = &rxr->rx_buf_ring[cons];
847
848         prod_rx_buf->data = data;
849         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
850
851         prod_rx_buf->mapping = cons_rx_buf->mapping;
852
853         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
854         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
855
856         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
857 }
858
859 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
860 {
861         u16 next, max = rxr->rx_agg_bmap_size;
862
863         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
864         if (next >= max)
865                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
866         return next;
867 }
868
869 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
870                                      struct bnxt_rx_ring_info *rxr,
871                                      u16 prod, gfp_t gfp)
872 {
873         struct rx_bd *rxbd =
874                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
875         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
876         struct pci_dev *pdev = bp->pdev;
877         struct page *page;
878         dma_addr_t mapping;
879         u16 sw_prod = rxr->rx_sw_agg_prod;
880         unsigned int offset = 0;
881
882         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
883                 page = rxr->rx_page;
884                 if (!page) {
885                         page = alloc_page(gfp);
886                         if (!page)
887                                 return -ENOMEM;
888                         rxr->rx_page = page;
889                         rxr->rx_page_offset = 0;
890                 }
891                 offset = rxr->rx_page_offset;
892                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
893                 if (rxr->rx_page_offset == PAGE_SIZE)
894                         rxr->rx_page = NULL;
895                 else
896                         get_page(page);
897         } else {
898                 page = alloc_page(gfp);
899                 if (!page)
900                         return -ENOMEM;
901         }
902
903         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
904                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
905                                      DMA_ATTR_WEAK_ORDERING);
906         if (dma_mapping_error(&pdev->dev, mapping)) {
907                 __free_page(page);
908                 return -EIO;
909         }
910
911         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
912                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
913
914         __set_bit(sw_prod, rxr->rx_agg_bmap);
915         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
916         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
917
918         rx_agg_buf->page = page;
919         rx_agg_buf->offset = offset;
920         rx_agg_buf->mapping = mapping;
921         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
922         rxbd->rx_bd_opaque = sw_prod;
923         return 0;
924 }
925
926 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
927                                        struct bnxt_cp_ring_info *cpr,
928                                        u16 cp_cons, u16 curr)
929 {
930         struct rx_agg_cmp *agg;
931
932         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
933         agg = (struct rx_agg_cmp *)
934                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
935         return agg;
936 }
937
938 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
939                                               struct bnxt_rx_ring_info *rxr,
940                                               u16 agg_id, u16 curr)
941 {
942         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
943
944         return &tpa_info->agg_arr[curr];
945 }
946
947 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
948                                    u16 start, u32 agg_bufs, bool tpa)
949 {
950         struct bnxt_napi *bnapi = cpr->bnapi;
951         struct bnxt *bp = bnapi->bp;
952         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
953         u16 prod = rxr->rx_agg_prod;
954         u16 sw_prod = rxr->rx_sw_agg_prod;
955         bool p5_tpa = false;
956         u32 i;
957
958         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
959                 p5_tpa = true;
960
961         for (i = 0; i < agg_bufs; i++) {
962                 u16 cons;
963                 struct rx_agg_cmp *agg;
964                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
965                 struct rx_bd *prod_bd;
966                 struct page *page;
967
968                 if (p5_tpa)
969                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
970                 else
971                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
972                 cons = agg->rx_agg_cmp_opaque;
973                 __clear_bit(cons, rxr->rx_agg_bmap);
974
975                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
976                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
977
978                 __set_bit(sw_prod, rxr->rx_agg_bmap);
979                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
980                 cons_rx_buf = &rxr->rx_agg_ring[cons];
981
982                 /* It is possible for sw_prod to be equal to cons, so
983                  * set cons_rx_buf->page to NULL first.
984                  */
985                 page = cons_rx_buf->page;
986                 cons_rx_buf->page = NULL;
987                 prod_rx_buf->page = page;
988                 prod_rx_buf->offset = cons_rx_buf->offset;
989
990                 prod_rx_buf->mapping = cons_rx_buf->mapping;
991
992                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
993
994                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
995                 prod_bd->rx_bd_opaque = sw_prod;
996
997                 prod = NEXT_RX_AGG(prod);
998                 sw_prod = NEXT_RX_AGG(sw_prod);
999         }
1000         rxr->rx_agg_prod = prod;
1001         rxr->rx_sw_agg_prod = sw_prod;
1002 }
1003
1004 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1005                                         struct bnxt_rx_ring_info *rxr,
1006                                         u16 cons, void *data, u8 *data_ptr,
1007                                         dma_addr_t dma_addr,
1008                                         unsigned int offset_and_len)
1009 {
1010         unsigned int payload = offset_and_len >> 16;
1011         unsigned int len = offset_and_len & 0xffff;
1012         skb_frag_t *frag;
1013         struct page *page = data;
1014         u16 prod = rxr->rx_prod;
1015         struct sk_buff *skb;
1016         int off, err;
1017
1018         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1019         if (unlikely(err)) {
1020                 bnxt_reuse_rx_data(rxr, cons, data);
1021                 return NULL;
1022         }
1023         dma_addr -= bp->rx_dma_offset;
1024         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1025                              DMA_ATTR_WEAK_ORDERING);
1026         page_pool_release_page(rxr->page_pool, page);
1027
1028         if (unlikely(!payload))
1029                 payload = eth_get_headlen(bp->dev, data_ptr, len);
1030
1031         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1032         if (!skb) {
1033                 __free_page(page);
1034                 return NULL;
1035         }
1036
1037         off = (void *)data_ptr - page_address(page);
1038         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1039         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1040                payload + NET_IP_ALIGN);
1041
1042         frag = &skb_shinfo(skb)->frags[0];
1043         skb_frag_size_sub(frag, payload);
1044         skb_frag_off_add(frag, payload);
1045         skb->data_len -= payload;
1046         skb->tail += payload;
1047
1048         return skb;
1049 }
1050
1051 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1052                                    struct bnxt_rx_ring_info *rxr, u16 cons,
1053                                    void *data, u8 *data_ptr,
1054                                    dma_addr_t dma_addr,
1055                                    unsigned int offset_and_len)
1056 {
1057         u16 prod = rxr->rx_prod;
1058         struct sk_buff *skb;
1059         int err;
1060
1061         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1062         if (unlikely(err)) {
1063                 bnxt_reuse_rx_data(rxr, cons, data);
1064                 return NULL;
1065         }
1066
1067         skb = build_skb(data, 0);
1068         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1069                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1070         if (!skb) {
1071                 kfree(data);
1072                 return NULL;
1073         }
1074
1075         skb_reserve(skb, bp->rx_offset);
1076         skb_put(skb, offset_and_len & 0xffff);
1077         return skb;
1078 }
1079
1080 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1081                                      struct bnxt_cp_ring_info *cpr,
1082                                      struct sk_buff *skb, u16 idx,
1083                                      u32 agg_bufs, bool tpa)
1084 {
1085         struct bnxt_napi *bnapi = cpr->bnapi;
1086         struct pci_dev *pdev = bp->pdev;
1087         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1088         u16 prod = rxr->rx_agg_prod;
1089         bool p5_tpa = false;
1090         u32 i;
1091
1092         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1093                 p5_tpa = true;
1094
1095         for (i = 0; i < agg_bufs; i++) {
1096                 u16 cons, frag_len;
1097                 struct rx_agg_cmp *agg;
1098                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1099                 struct page *page;
1100                 dma_addr_t mapping;
1101
1102                 if (p5_tpa)
1103                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1104                 else
1105                         agg = bnxt_get_agg(bp, cpr, idx, i);
1106                 cons = agg->rx_agg_cmp_opaque;
1107                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1108                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1109
1110                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1111                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1112                                    cons_rx_buf->offset, frag_len);
1113                 __clear_bit(cons, rxr->rx_agg_bmap);
1114
1115                 /* It is possible for bnxt_alloc_rx_page() to allocate
1116                  * a sw_prod index that equals the cons index, so we
1117                  * need to clear the cons entry now.
1118                  */
1119                 mapping = cons_rx_buf->mapping;
1120                 page = cons_rx_buf->page;
1121                 cons_rx_buf->page = NULL;
1122
1123                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1124                         struct skb_shared_info *shinfo;
1125                         unsigned int nr_frags;
1126
1127                         shinfo = skb_shinfo(skb);
1128                         nr_frags = --shinfo->nr_frags;
1129                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1130
1131                         dev_kfree_skb(skb);
1132
1133                         cons_rx_buf->page = page;
1134
1135                         /* Update prod since possibly some pages have been
1136                          * allocated already.
1137                          */
1138                         rxr->rx_agg_prod = prod;
1139                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1140                         return NULL;
1141                 }
1142
1143                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1144                                      PCI_DMA_FROMDEVICE,
1145                                      DMA_ATTR_WEAK_ORDERING);
1146
1147                 skb->data_len += frag_len;
1148                 skb->len += frag_len;
1149                 skb->truesize += PAGE_SIZE;
1150
1151                 prod = NEXT_RX_AGG(prod);
1152         }
1153         rxr->rx_agg_prod = prod;
1154         return skb;
1155 }
1156
1157 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1158                                u8 agg_bufs, u32 *raw_cons)
1159 {
1160         u16 last;
1161         struct rx_agg_cmp *agg;
1162
1163         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1164         last = RING_CMP(*raw_cons);
1165         agg = (struct rx_agg_cmp *)
1166                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1167         return RX_AGG_CMP_VALID(agg, *raw_cons);
1168 }
1169
1170 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1171                                             unsigned int len,
1172                                             dma_addr_t mapping)
1173 {
1174         struct bnxt *bp = bnapi->bp;
1175         struct pci_dev *pdev = bp->pdev;
1176         struct sk_buff *skb;
1177
1178         skb = napi_alloc_skb(&bnapi->napi, len);
1179         if (!skb)
1180                 return NULL;
1181
1182         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1183                                 bp->rx_dir);
1184
1185         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1186                len + NET_IP_ALIGN);
1187
1188         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1189                                    bp->rx_dir);
1190
1191         skb_put(skb, len);
1192         return skb;
1193 }
1194
1195 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1196                            u32 *raw_cons, void *cmp)
1197 {
1198         struct rx_cmp *rxcmp = cmp;
1199         u32 tmp_raw_cons = *raw_cons;
1200         u8 cmp_type, agg_bufs = 0;
1201
1202         cmp_type = RX_CMP_TYPE(rxcmp);
1203
1204         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1205                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1206                             RX_CMP_AGG_BUFS) >>
1207                            RX_CMP_AGG_BUFS_SHIFT;
1208         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1209                 struct rx_tpa_end_cmp *tpa_end = cmp;
1210
1211                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1212                         return 0;
1213
1214                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1215         }
1216
1217         if (agg_bufs) {
1218                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1219                         return -EBUSY;
1220         }
1221         *raw_cons = tmp_raw_cons;
1222         return 0;
1223 }
1224
1225 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1226 {
1227         if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1228                 return;
1229
1230         if (BNXT_PF(bp))
1231                 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1232         else
1233                 schedule_delayed_work(&bp->fw_reset_task, delay);
1234 }
1235
1236 static void bnxt_queue_sp_work(struct bnxt *bp)
1237 {
1238         if (BNXT_PF(bp))
1239                 queue_work(bnxt_pf_wq, &bp->sp_task);
1240         else
1241                 schedule_work(&bp->sp_task);
1242 }
1243
1244 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1245 {
1246         if (!rxr->bnapi->in_reset) {
1247                 rxr->bnapi->in_reset = true;
1248                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1249                         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1250                 else
1251                         set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1252                 bnxt_queue_sp_work(bp);
1253         }
1254         rxr->rx_next_cons = 0xffff;
1255 }
1256
1257 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1258 {
1259         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1260         u16 idx = agg_id & MAX_TPA_P5_MASK;
1261
1262         if (test_bit(idx, map->agg_idx_bmap))
1263                 idx = find_first_zero_bit(map->agg_idx_bmap,
1264                                           BNXT_AGG_IDX_BMAP_SIZE);
1265         __set_bit(idx, map->agg_idx_bmap);
1266         map->agg_id_tbl[agg_id] = idx;
1267         return idx;
1268 }
1269
1270 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1271 {
1272         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1273
1274         __clear_bit(idx, map->agg_idx_bmap);
1275 }
1276
1277 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1278 {
1279         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1280
1281         return map->agg_id_tbl[agg_id];
1282 }
1283
1284 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1285                            struct rx_tpa_start_cmp *tpa_start,
1286                            struct rx_tpa_start_cmp_ext *tpa_start1)
1287 {
1288         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1289         struct bnxt_tpa_info *tpa_info;
1290         u16 cons, prod, agg_id;
1291         struct rx_bd *prod_bd;
1292         dma_addr_t mapping;
1293
1294         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1295                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1296                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1297         } else {
1298                 agg_id = TPA_START_AGG_ID(tpa_start);
1299         }
1300         cons = tpa_start->rx_tpa_start_cmp_opaque;
1301         prod = rxr->rx_prod;
1302         cons_rx_buf = &rxr->rx_buf_ring[cons];
1303         prod_rx_buf = &rxr->rx_buf_ring[prod];
1304         tpa_info = &rxr->rx_tpa[agg_id];
1305
1306         if (unlikely(cons != rxr->rx_next_cons ||
1307                      TPA_START_ERROR(tpa_start))) {
1308                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1309                             cons, rxr->rx_next_cons,
1310                             TPA_START_ERROR_CODE(tpa_start1));
1311                 bnxt_sched_reset(bp, rxr);
1312                 return;
1313         }
1314         /* Store cfa_code in tpa_info to use in tpa_end
1315          * completion processing.
1316          */
1317         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1318         prod_rx_buf->data = tpa_info->data;
1319         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1320
1321         mapping = tpa_info->mapping;
1322         prod_rx_buf->mapping = mapping;
1323
1324         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1325
1326         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1327
1328         tpa_info->data = cons_rx_buf->data;
1329         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1330         cons_rx_buf->data = NULL;
1331         tpa_info->mapping = cons_rx_buf->mapping;
1332
1333         tpa_info->len =
1334                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1335                                 RX_TPA_START_CMP_LEN_SHIFT;
1336         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1337                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1338
1339                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1340                 tpa_info->gso_type = SKB_GSO_TCPV4;
1341                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1342                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1343                         tpa_info->gso_type = SKB_GSO_TCPV6;
1344                 tpa_info->rss_hash =
1345                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1346         } else {
1347                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1348                 tpa_info->gso_type = 0;
1349                 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1350         }
1351         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1352         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1353         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1354         tpa_info->agg_count = 0;
1355
1356         rxr->rx_prod = NEXT_RX(prod);
1357         cons = NEXT_RX(cons);
1358         rxr->rx_next_cons = NEXT_RX(cons);
1359         cons_rx_buf = &rxr->rx_buf_ring[cons];
1360
1361         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1362         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1363         cons_rx_buf->data = NULL;
1364 }
1365
1366 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1367 {
1368         if (agg_bufs)
1369                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1370 }
1371
1372 #ifdef CONFIG_INET
1373 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1374 {
1375         struct udphdr *uh = NULL;
1376
1377         if (ip_proto == htons(ETH_P_IP)) {
1378                 struct iphdr *iph = (struct iphdr *)skb->data;
1379
1380                 if (iph->protocol == IPPROTO_UDP)
1381                         uh = (struct udphdr *)(iph + 1);
1382         } else {
1383                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1384
1385                 if (iph->nexthdr == IPPROTO_UDP)
1386                         uh = (struct udphdr *)(iph + 1);
1387         }
1388         if (uh) {
1389                 if (uh->check)
1390                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1391                 else
1392                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1393         }
1394 }
1395 #endif
1396
1397 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1398                                            int payload_off, int tcp_ts,
1399                                            struct sk_buff *skb)
1400 {
1401 #ifdef CONFIG_INET
1402         struct tcphdr *th;
1403         int len, nw_off;
1404         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1405         u32 hdr_info = tpa_info->hdr_info;
1406         bool loopback = false;
1407
1408         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1409         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1410         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1411
1412         /* If the packet is an internal loopback packet, the offsets will
1413          * have an extra 4 bytes.
1414          */
1415         if (inner_mac_off == 4) {
1416                 loopback = true;
1417         } else if (inner_mac_off > 4) {
1418                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1419                                             ETH_HLEN - 2));
1420
1421                 /* We only support inner iPv4/ipv6.  If we don't see the
1422                  * correct protocol ID, it must be a loopback packet where
1423                  * the offsets are off by 4.
1424                  */
1425                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1426                         loopback = true;
1427         }
1428         if (loopback) {
1429                 /* internal loopback packet, subtract all offsets by 4 */
1430                 inner_ip_off -= 4;
1431                 inner_mac_off -= 4;
1432                 outer_ip_off -= 4;
1433         }
1434
1435         nw_off = inner_ip_off - ETH_HLEN;
1436         skb_set_network_header(skb, nw_off);
1437         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1438                 struct ipv6hdr *iph = ipv6_hdr(skb);
1439
1440                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1441                 len = skb->len - skb_transport_offset(skb);
1442                 th = tcp_hdr(skb);
1443                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1444         } else {
1445                 struct iphdr *iph = ip_hdr(skb);
1446
1447                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1448                 len = skb->len - skb_transport_offset(skb);
1449                 th = tcp_hdr(skb);
1450                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1451         }
1452
1453         if (inner_mac_off) { /* tunnel */
1454                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1455                                             ETH_HLEN - 2));
1456
1457                 bnxt_gro_tunnel(skb, proto);
1458         }
1459 #endif
1460         return skb;
1461 }
1462
1463 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1464                                            int payload_off, int tcp_ts,
1465                                            struct sk_buff *skb)
1466 {
1467 #ifdef CONFIG_INET
1468         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1469         u32 hdr_info = tpa_info->hdr_info;
1470         int iphdr_len, nw_off;
1471
1472         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1473         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1474         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1475
1476         nw_off = inner_ip_off - ETH_HLEN;
1477         skb_set_network_header(skb, nw_off);
1478         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1479                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1480         skb_set_transport_header(skb, nw_off + iphdr_len);
1481
1482         if (inner_mac_off) { /* tunnel */
1483                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1484                                             ETH_HLEN - 2));
1485
1486                 bnxt_gro_tunnel(skb, proto);
1487         }
1488 #endif
1489         return skb;
1490 }
1491
1492 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1493 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1494
1495 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1496                                            int payload_off, int tcp_ts,
1497                                            struct sk_buff *skb)
1498 {
1499 #ifdef CONFIG_INET
1500         struct tcphdr *th;
1501         int len, nw_off, tcp_opt_len = 0;
1502
1503         if (tcp_ts)
1504                 tcp_opt_len = 12;
1505
1506         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1507                 struct iphdr *iph;
1508
1509                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1510                          ETH_HLEN;
1511                 skb_set_network_header(skb, nw_off);
1512                 iph = ip_hdr(skb);
1513                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1514                 len = skb->len - skb_transport_offset(skb);
1515                 th = tcp_hdr(skb);
1516                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1517         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1518                 struct ipv6hdr *iph;
1519
1520                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1521                          ETH_HLEN;
1522                 skb_set_network_header(skb, nw_off);
1523                 iph = ipv6_hdr(skb);
1524                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1525                 len = skb->len - skb_transport_offset(skb);
1526                 th = tcp_hdr(skb);
1527                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1528         } else {
1529                 dev_kfree_skb_any(skb);
1530                 return NULL;
1531         }
1532
1533         if (nw_off) /* tunnel */
1534                 bnxt_gro_tunnel(skb, skb->protocol);
1535 #endif
1536         return skb;
1537 }
1538
1539 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1540                                            struct bnxt_tpa_info *tpa_info,
1541                                            struct rx_tpa_end_cmp *tpa_end,
1542                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1543                                            struct sk_buff *skb)
1544 {
1545 #ifdef CONFIG_INET
1546         int payload_off;
1547         u16 segs;
1548
1549         segs = TPA_END_TPA_SEGS(tpa_end);
1550         if (segs == 1)
1551                 return skb;
1552
1553         NAPI_GRO_CB(skb)->count = segs;
1554         skb_shinfo(skb)->gso_size =
1555                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1556         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1557         if (bp->flags & BNXT_FLAG_CHIP_P5)
1558                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1559         else
1560                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1561         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1562         if (likely(skb))
1563                 tcp_gro_complete(skb);
1564 #endif
1565         return skb;
1566 }
1567
1568 /* Given the cfa_code of a received packet determine which
1569  * netdev (vf-rep or PF) the packet is destined to.
1570  */
1571 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1572 {
1573         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1574
1575         /* if vf-rep dev is NULL, the must belongs to the PF */
1576         return dev ? dev : bp->dev;
1577 }
1578
1579 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1580                                            struct bnxt_cp_ring_info *cpr,
1581                                            u32 *raw_cons,
1582                                            struct rx_tpa_end_cmp *tpa_end,
1583                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1584                                            u8 *event)
1585 {
1586         struct bnxt_napi *bnapi = cpr->bnapi;
1587         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1588         u8 *data_ptr, agg_bufs;
1589         unsigned int len;
1590         struct bnxt_tpa_info *tpa_info;
1591         dma_addr_t mapping;
1592         struct sk_buff *skb;
1593         u16 idx = 0, agg_id;
1594         void *data;
1595         bool gro;
1596
1597         if (unlikely(bnapi->in_reset)) {
1598                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1599
1600                 if (rc < 0)
1601                         return ERR_PTR(-EBUSY);
1602                 return NULL;
1603         }
1604
1605         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1606                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1607                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1608                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1609                 tpa_info = &rxr->rx_tpa[agg_id];
1610                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1611                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1612                                     agg_bufs, tpa_info->agg_count);
1613                         agg_bufs = tpa_info->agg_count;
1614                 }
1615                 tpa_info->agg_count = 0;
1616                 *event |= BNXT_AGG_EVENT;
1617                 bnxt_free_agg_idx(rxr, agg_id);
1618                 idx = agg_id;
1619                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1620         } else {
1621                 agg_id = TPA_END_AGG_ID(tpa_end);
1622                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1623                 tpa_info = &rxr->rx_tpa[agg_id];
1624                 idx = RING_CMP(*raw_cons);
1625                 if (agg_bufs) {
1626                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1627                                 return ERR_PTR(-EBUSY);
1628
1629                         *event |= BNXT_AGG_EVENT;
1630                         idx = NEXT_CMP(idx);
1631                 }
1632                 gro = !!TPA_END_GRO(tpa_end);
1633         }
1634         data = tpa_info->data;
1635         data_ptr = tpa_info->data_ptr;
1636         prefetch(data_ptr);
1637         len = tpa_info->len;
1638         mapping = tpa_info->mapping;
1639
1640         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1641                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1642                 if (agg_bufs > MAX_SKB_FRAGS)
1643                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1644                                     agg_bufs, (int)MAX_SKB_FRAGS);
1645                 return NULL;
1646         }
1647
1648         if (len <= bp->rx_copy_thresh) {
1649                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1650                 if (!skb) {
1651                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1652                         return NULL;
1653                 }
1654         } else {
1655                 u8 *new_data;
1656                 dma_addr_t new_mapping;
1657
1658                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1659                 if (!new_data) {
1660                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1661                         return NULL;
1662                 }
1663
1664                 tpa_info->data = new_data;
1665                 tpa_info->data_ptr = new_data + bp->rx_offset;
1666                 tpa_info->mapping = new_mapping;
1667
1668                 skb = build_skb(data, 0);
1669                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1670                                        bp->rx_buf_use_size, bp->rx_dir,
1671                                        DMA_ATTR_WEAK_ORDERING);
1672
1673                 if (!skb) {
1674                         kfree(data);
1675                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1676                         return NULL;
1677                 }
1678                 skb_reserve(skb, bp->rx_offset);
1679                 skb_put(skb, len);
1680         }
1681
1682         if (agg_bufs) {
1683                 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1684                 if (!skb) {
1685                         /* Page reuse already handled by bnxt_rx_pages(). */
1686                         return NULL;
1687                 }
1688         }
1689
1690         skb->protocol =
1691                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1692
1693         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1694                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1695
1696         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1697             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1698                 __be16 vlan_proto = htons(tpa_info->metadata >>
1699                                           RX_CMP_FLAGS2_METADATA_TPID_SFT);
1700                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1701
1702                 if (eth_type_vlan(vlan_proto)) {
1703                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1704                 } else {
1705                         dev_kfree_skb(skb);
1706                         return NULL;
1707                 }
1708         }
1709
1710         skb_checksum_none_assert(skb);
1711         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1712                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1713                 skb->csum_level =
1714                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1715         }
1716
1717         if (gro)
1718                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1719
1720         return skb;
1721 }
1722
1723 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1724                          struct rx_agg_cmp *rx_agg)
1725 {
1726         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1727         struct bnxt_tpa_info *tpa_info;
1728
1729         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1730         tpa_info = &rxr->rx_tpa[agg_id];
1731         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1732         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1733 }
1734
1735 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1736                              struct sk_buff *skb)
1737 {
1738         if (skb->dev != bp->dev) {
1739                 /* this packet belongs to a vf-rep */
1740                 bnxt_vf_rep_rx(bp, skb);
1741                 return;
1742         }
1743         skb_record_rx_queue(skb, bnapi->index);
1744         napi_gro_receive(&bnapi->napi, skb);
1745 }
1746
1747 /* returns the following:
1748  * 1       - 1 packet successfully received
1749  * 0       - successful TPA_START, packet not completed yet
1750  * -EBUSY  - completion ring does not have all the agg buffers yet
1751  * -ENOMEM - packet aborted due to out of memory
1752  * -EIO    - packet aborted due to hw error indicated in BD
1753  */
1754 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1755                        u32 *raw_cons, u8 *event)
1756 {
1757         struct bnxt_napi *bnapi = cpr->bnapi;
1758         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1759         struct net_device *dev = bp->dev;
1760         struct rx_cmp *rxcmp;
1761         struct rx_cmp_ext *rxcmp1;
1762         u32 tmp_raw_cons = *raw_cons;
1763         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1764         struct bnxt_sw_rx_bd *rx_buf;
1765         unsigned int len;
1766         u8 *data_ptr, agg_bufs, cmp_type;
1767         dma_addr_t dma_addr;
1768         struct sk_buff *skb;
1769         u32 flags, misc;
1770         void *data;
1771         int rc = 0;
1772
1773         rxcmp = (struct rx_cmp *)
1774                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1775
1776         cmp_type = RX_CMP_TYPE(rxcmp);
1777
1778         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1779                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1780                 goto next_rx_no_prod_no_len;
1781         }
1782
1783         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1784         cp_cons = RING_CMP(tmp_raw_cons);
1785         rxcmp1 = (struct rx_cmp_ext *)
1786                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1787
1788         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1789                 return -EBUSY;
1790
1791         /* The valid test of the entry must be done first before
1792          * reading any further.
1793          */
1794         dma_rmb();
1795         prod = rxr->rx_prod;
1796
1797         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1798                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1799                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1800
1801                 *event |= BNXT_RX_EVENT;
1802                 goto next_rx_no_prod_no_len;
1803
1804         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1805                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1806                                    (struct rx_tpa_end_cmp *)rxcmp,
1807                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1808
1809                 if (IS_ERR(skb))
1810                         return -EBUSY;
1811
1812                 rc = -ENOMEM;
1813                 if (likely(skb)) {
1814                         bnxt_deliver_skb(bp, bnapi, skb);
1815                         rc = 1;
1816                 }
1817                 *event |= BNXT_RX_EVENT;
1818                 goto next_rx_no_prod_no_len;
1819         }
1820
1821         cons = rxcmp->rx_cmp_opaque;
1822         if (unlikely(cons != rxr->rx_next_cons)) {
1823                 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1824
1825                 /* 0xffff is forced error, don't print it */
1826                 if (rxr->rx_next_cons != 0xffff)
1827                         netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1828                                     cons, rxr->rx_next_cons);
1829                 bnxt_sched_reset(bp, rxr);
1830                 if (rc1)
1831                         return rc1;
1832                 goto next_rx_no_prod_no_len;
1833         }
1834         rx_buf = &rxr->rx_buf_ring[cons];
1835         data = rx_buf->data;
1836         data_ptr = rx_buf->data_ptr;
1837         prefetch(data_ptr);
1838
1839         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1840         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1841
1842         if (agg_bufs) {
1843                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1844                         return -EBUSY;
1845
1846                 cp_cons = NEXT_CMP(cp_cons);
1847                 *event |= BNXT_AGG_EVENT;
1848         }
1849         *event |= BNXT_RX_EVENT;
1850
1851         rx_buf->data = NULL;
1852         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1853                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1854
1855                 bnxt_reuse_rx_data(rxr, cons, data);
1856                 if (agg_bufs)
1857                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1858                                                false);
1859
1860                 rc = -EIO;
1861                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1862                         bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1863                         if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1864                             !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1865                                 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1866                                                  rx_err);
1867                                 bnxt_sched_reset(bp, rxr);
1868                         }
1869                 }
1870                 goto next_rx_no_len;
1871         }
1872
1873         flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1874         len = flags >> RX_CMP_LEN_SHIFT;
1875         dma_addr = rx_buf->mapping;
1876
1877         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1878                 rc = 1;
1879                 goto next_rx;
1880         }
1881
1882         if (len <= bp->rx_copy_thresh) {
1883                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1884                 bnxt_reuse_rx_data(rxr, cons, data);
1885                 if (!skb) {
1886                         if (agg_bufs)
1887                                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1888                                                        agg_bufs, false);
1889                         rc = -ENOMEM;
1890                         goto next_rx;
1891                 }
1892         } else {
1893                 u32 payload;
1894
1895                 if (rx_buf->data_ptr == data_ptr)
1896                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1897                 else
1898                         payload = 0;
1899                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1900                                       payload | len);
1901                 if (!skb) {
1902                         rc = -ENOMEM;
1903                         goto next_rx;
1904                 }
1905         }
1906
1907         if (agg_bufs) {
1908                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1909                 if (!skb) {
1910                         rc = -ENOMEM;
1911                         goto next_rx;
1912                 }
1913         }
1914
1915         if (RX_CMP_HASH_VALID(rxcmp)) {
1916                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1917                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1918
1919                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1920                 if (hash_type != 1 && hash_type != 3)
1921                         type = PKT_HASH_TYPE_L3;
1922                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1923         }
1924
1925         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1926         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1927
1928         if ((rxcmp1->rx_cmp_flags2 &
1929              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1930             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1931                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1932                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1933                 __be16 vlan_proto = htons(meta_data >>
1934                                           RX_CMP_FLAGS2_METADATA_TPID_SFT);
1935
1936                 if (eth_type_vlan(vlan_proto)) {
1937                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1938                 } else {
1939                         dev_kfree_skb(skb);
1940                         goto next_rx;
1941                 }
1942         }
1943
1944         skb_checksum_none_assert(skb);
1945         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1946                 if (dev->features & NETIF_F_RXCSUM) {
1947                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1948                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1949                 }
1950         } else {
1951                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1952                         if (dev->features & NETIF_F_RXCSUM)
1953                                 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1954                 }
1955         }
1956
1957         if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1958                      RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1959                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1960                         u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1961                         u64 ns, ts;
1962
1963                         if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1964                                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1965
1966                                 spin_lock_bh(&ptp->ptp_lock);
1967                                 ns = timecounter_cyc2time(&ptp->tc, ts);
1968                                 spin_unlock_bh(&ptp->ptp_lock);
1969                                 memset(skb_hwtstamps(skb), 0,
1970                                        sizeof(*skb_hwtstamps(skb)));
1971                                 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1972                         }
1973                 }
1974         }
1975         bnxt_deliver_skb(bp, bnapi, skb);
1976         rc = 1;
1977
1978 next_rx:
1979         cpr->rx_packets += 1;
1980         cpr->rx_bytes += len;
1981
1982 next_rx_no_len:
1983         rxr->rx_prod = NEXT_RX(prod);
1984         rxr->rx_next_cons = NEXT_RX(cons);
1985
1986 next_rx_no_prod_no_len:
1987         *raw_cons = tmp_raw_cons;
1988
1989         return rc;
1990 }
1991
1992 /* In netpoll mode, if we are using a combined completion ring, we need to
1993  * discard the rx packets and recycle the buffers.
1994  */
1995 static int bnxt_force_rx_discard(struct bnxt *bp,
1996                                  struct bnxt_cp_ring_info *cpr,
1997                                  u32 *raw_cons, u8 *event)
1998 {
1999         u32 tmp_raw_cons = *raw_cons;
2000         struct rx_cmp_ext *rxcmp1;
2001         struct rx_cmp *rxcmp;
2002         u16 cp_cons;
2003         u8 cmp_type;
2004
2005         cp_cons = RING_CMP(tmp_raw_cons);
2006         rxcmp = (struct rx_cmp *)
2007                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2008
2009         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2010         cp_cons = RING_CMP(tmp_raw_cons);
2011         rxcmp1 = (struct rx_cmp_ext *)
2012                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2013
2014         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2015                 return -EBUSY;
2016
2017         /* The valid test of the entry must be done first before
2018          * reading any further.
2019          */
2020         dma_rmb();
2021         cmp_type = RX_CMP_TYPE(rxcmp);
2022         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2023                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2024                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2025         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2026                 struct rx_tpa_end_cmp_ext *tpa_end1;
2027
2028                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2029                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2030                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2031         }
2032         return bnxt_rx_pkt(bp, cpr, raw_cons, event);
2033 }
2034
2035 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2036 {
2037         struct bnxt_fw_health *fw_health = bp->fw_health;
2038         u32 reg = fw_health->regs[reg_idx];
2039         u32 reg_type, reg_off, val = 0;
2040
2041         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2042         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2043         switch (reg_type) {
2044         case BNXT_FW_HEALTH_REG_TYPE_CFG:
2045                 pci_read_config_dword(bp->pdev, reg_off, &val);
2046                 break;
2047         case BNXT_FW_HEALTH_REG_TYPE_GRC:
2048                 reg_off = fw_health->mapped_regs[reg_idx];
2049                 fallthrough;
2050         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2051                 val = readl(bp->bar0 + reg_off);
2052                 break;
2053         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2054                 val = readl(bp->bar1 + reg_off);
2055                 break;
2056         }
2057         if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2058                 val &= fw_health->fw_reset_inprog_reg_mask;
2059         return val;
2060 }
2061
2062 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2063 {
2064         int i;
2065
2066         for (i = 0; i < bp->rx_nr_rings; i++) {
2067                 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2068                 struct bnxt_ring_grp_info *grp_info;
2069
2070                 grp_info = &bp->grp_info[grp_idx];
2071                 if (grp_info->agg_fw_ring_id == ring_id)
2072                         return grp_idx;
2073         }
2074         return INVALID_HW_RING_ID;
2075 }
2076
2077 #define BNXT_GET_EVENT_PORT(data)       \
2078         ((data) &                       \
2079          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2080
2081 #define BNXT_EVENT_RING_TYPE(data2)     \
2082         ((data2) &                      \
2083          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2084
2085 #define BNXT_EVENT_RING_TYPE_RX(data2)  \
2086         (BNXT_EVENT_RING_TYPE(data2) == \
2087          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2088
2089 static int bnxt_async_event_process(struct bnxt *bp,
2090                                     struct hwrm_async_event_cmpl *cmpl)
2091 {
2092         u16 event_id = le16_to_cpu(cmpl->event_id);
2093         u32 data1 = le32_to_cpu(cmpl->event_data1);
2094         u32 data2 = le32_to_cpu(cmpl->event_data2);
2095
2096         /* TODO CHIMP_FW: Define event id's for link change, error etc */
2097         switch (event_id) {
2098         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2099                 struct bnxt_link_info *link_info = &bp->link_info;
2100
2101                 if (BNXT_VF(bp))
2102                         goto async_event_process_exit;
2103
2104                 /* print unsupported speed warning in forced speed mode only */
2105                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2106                     (data1 & 0x20000)) {
2107                         u16 fw_speed = link_info->force_link_speed;
2108                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2109
2110                         if (speed != SPEED_UNKNOWN)
2111                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2112                                             speed);
2113                 }
2114                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2115         }
2116                 fallthrough;
2117         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2118         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2119                 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2120                 fallthrough;
2121         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2122                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2123                 break;
2124         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2125                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2126                 break;
2127         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2128                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2129
2130                 if (BNXT_VF(bp))
2131                         break;
2132
2133                 if (bp->pf.port_id != port_id)
2134                         break;
2135
2136                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2137                 break;
2138         }
2139         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2140                 if (BNXT_PF(bp))
2141                         goto async_event_process_exit;
2142                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2143                 break;
2144         case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2145                 char *fatal_str = "non-fatal";
2146
2147                 if (!bp->fw_health)
2148                         goto async_event_process_exit;
2149
2150                 bp->fw_reset_timestamp = jiffies;
2151                 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2152                 if (!bp->fw_reset_min_dsecs)
2153                         bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2154                 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2155                 if (!bp->fw_reset_max_dsecs)
2156                         bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2157                 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2158                         fatal_str = "fatal";
2159                         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2160                 }
2161                 netif_warn(bp, hw, bp->dev,
2162                            "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2163                            fatal_str, data1, data2,
2164                            bp->fw_reset_min_dsecs * 100,
2165                            bp->fw_reset_max_dsecs * 100);
2166                 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2167                 break;
2168         }
2169         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2170                 struct bnxt_fw_health *fw_health = bp->fw_health;
2171
2172                 if (!fw_health)
2173                         goto async_event_process_exit;
2174
2175                 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2176                 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2177                 if (!fw_health->enabled) {
2178                         netif_info(bp, drv, bp->dev,
2179                                    "Error recovery info: error recovery[0]\n");
2180                         break;
2181                 }
2182                 fw_health->tmr_multiplier =
2183                         DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2184                                      bp->current_interval * 10);
2185                 fw_health->tmr_counter = fw_health->tmr_multiplier;
2186                 fw_health->last_fw_heartbeat =
2187                         bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2188                 fw_health->last_fw_reset_cnt =
2189                         bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2190                 netif_info(bp, drv, bp->dev,
2191                            "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2192                            fw_health->master, fw_health->last_fw_reset_cnt,
2193                            bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2194                 goto async_event_process_exit;
2195         }
2196         case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2197                 netif_notice(bp, hw, bp->dev,
2198                              "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2199                              data1, data2);
2200                 goto async_event_process_exit;
2201         case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2202                 struct bnxt_rx_ring_info *rxr;
2203                 u16 grp_idx;
2204
2205                 if (bp->flags & BNXT_FLAG_CHIP_P5)
2206                         goto async_event_process_exit;
2207
2208                 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2209                             BNXT_EVENT_RING_TYPE(data2), data1);
2210                 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2211                         goto async_event_process_exit;
2212
2213                 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2214                 if (grp_idx == INVALID_HW_RING_ID) {
2215                         netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2216                                     data1);
2217                         goto async_event_process_exit;
2218                 }
2219                 rxr = bp->bnapi[grp_idx]->rx_ring;
2220                 bnxt_sched_reset(bp, rxr);
2221                 goto async_event_process_exit;
2222         }
2223         case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2224                 struct bnxt_fw_health *fw_health = bp->fw_health;
2225
2226                 netif_notice(bp, hw, bp->dev,
2227                              "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2228                              data1, data2);
2229                 if (fw_health) {
2230                         fw_health->echo_req_data1 = data1;
2231                         fw_health->echo_req_data2 = data2;
2232                         set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2233                         break;
2234                 }
2235                 goto async_event_process_exit;
2236         }
2237         default:
2238                 goto async_event_process_exit;
2239         }
2240         bnxt_queue_sp_work(bp);
2241 async_event_process_exit:
2242         bnxt_ulp_async_events(bp, cmpl);
2243         return 0;
2244 }
2245
2246 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2247 {
2248         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2249         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2250         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2251                                 (struct hwrm_fwd_req_cmpl *)txcmp;
2252
2253         switch (cmpl_type) {
2254         case CMPL_BASE_TYPE_HWRM_DONE:
2255                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2256                 if (seq_id == bp->hwrm_intr_seq_id)
2257                         bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2258                 else
2259                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2260                 break;
2261
2262         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2263                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2264
2265                 if ((vf_id < bp->pf.first_vf_id) ||
2266                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2267                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2268                                    vf_id);
2269                         return -EINVAL;
2270                 }
2271
2272                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2273                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2274                 bnxt_queue_sp_work(bp);
2275                 break;
2276
2277         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2278                 bnxt_async_event_process(bp,
2279                                          (struct hwrm_async_event_cmpl *)txcmp);
2280                 break;
2281
2282         default:
2283                 break;
2284         }
2285
2286         return 0;
2287 }
2288
2289 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2290 {
2291         struct bnxt_napi *bnapi = dev_instance;
2292         struct bnxt *bp = bnapi->bp;
2293         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2294         u32 cons = RING_CMP(cpr->cp_raw_cons);
2295
2296         cpr->event_ctr++;
2297         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2298         napi_schedule(&bnapi->napi);
2299         return IRQ_HANDLED;
2300 }
2301
2302 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2303 {
2304         u32 raw_cons = cpr->cp_raw_cons;
2305         u16 cons = RING_CMP(raw_cons);
2306         struct tx_cmp *txcmp;
2307
2308         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2309
2310         return TX_CMP_VALID(txcmp, raw_cons);
2311 }
2312
2313 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2314 {
2315         struct bnxt_napi *bnapi = dev_instance;
2316         struct bnxt *bp = bnapi->bp;
2317         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2318         u32 cons = RING_CMP(cpr->cp_raw_cons);
2319         u32 int_status;
2320
2321         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2322
2323         if (!bnxt_has_work(bp, cpr)) {
2324                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2325                 /* return if erroneous interrupt */
2326                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2327                         return IRQ_NONE;
2328         }
2329
2330         /* disable ring IRQ */
2331         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2332
2333         /* Return here if interrupt is shared and is disabled. */
2334         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2335                 return IRQ_HANDLED;
2336
2337         napi_schedule(&bnapi->napi);
2338         return IRQ_HANDLED;
2339 }
2340
2341 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2342                             int budget)
2343 {
2344         struct bnxt_napi *bnapi = cpr->bnapi;
2345         u32 raw_cons = cpr->cp_raw_cons;
2346         u32 cons;
2347         int tx_pkts = 0;
2348         int rx_pkts = 0;
2349         u8 event = 0;
2350         struct tx_cmp *txcmp;
2351
2352         cpr->has_more_work = 0;
2353         cpr->had_work_done = 1;
2354         while (1) {
2355                 int rc;
2356
2357                 cons = RING_CMP(raw_cons);
2358                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2359
2360                 if (!TX_CMP_VALID(txcmp, raw_cons))
2361                         break;
2362
2363                 /* The valid test of the entry must be done first before
2364                  * reading any further.
2365                  */
2366                 dma_rmb();
2367                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2368                         tx_pkts++;
2369                         /* return full budget so NAPI will complete. */
2370                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2371                                 rx_pkts = budget;
2372                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2373                                 if (budget)
2374                                         cpr->has_more_work = 1;
2375                                 break;
2376                         }
2377                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2378                         if (likely(budget))
2379                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2380                         else
2381                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2382                                                            &event);
2383                         if (likely(rc >= 0))
2384                                 rx_pkts += rc;
2385                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2386                          * the NAPI budget.  Otherwise, we may potentially loop
2387                          * here forever if we consistently cannot allocate
2388                          * buffers.
2389                          */
2390                         else if (rc == -ENOMEM && budget)
2391                                 rx_pkts++;
2392                         else if (rc == -EBUSY)  /* partial completion */
2393                                 break;
2394                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2395                                      CMPL_BASE_TYPE_HWRM_DONE) ||
2396                                     (TX_CMP_TYPE(txcmp) ==
2397                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2398                                     (TX_CMP_TYPE(txcmp) ==
2399                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2400                         bnxt_hwrm_handler(bp, txcmp);
2401                 }
2402                 raw_cons = NEXT_RAW_CMP(raw_cons);
2403
2404                 if (rx_pkts && rx_pkts == budget) {
2405                         cpr->has_more_work = 1;
2406                         break;
2407                 }
2408         }
2409
2410         if (event & BNXT_REDIRECT_EVENT)
2411                 xdp_do_flush_map();
2412
2413         if (event & BNXT_TX_EVENT) {
2414                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2415                 u16 prod = txr->tx_prod;
2416
2417                 /* Sync BD data before updating doorbell */
2418                 wmb();
2419
2420                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2421         }
2422
2423         cpr->cp_raw_cons = raw_cons;
2424         bnapi->tx_pkts += tx_pkts;
2425         bnapi->events |= event;
2426         return rx_pkts;
2427 }
2428
2429 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2430 {
2431         if (bnapi->tx_pkts) {
2432                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2433                 bnapi->tx_pkts = 0;
2434         }
2435
2436         if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2437                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2438
2439                 if (bnapi->events & BNXT_AGG_EVENT)
2440                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2441                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2442         }
2443         bnapi->events = 0;
2444 }
2445
2446 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2447                           int budget)
2448 {
2449         struct bnxt_napi *bnapi = cpr->bnapi;
2450         int rx_pkts;
2451
2452         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2453
2454         /* ACK completion ring before freeing tx ring and producing new
2455          * buffers in rx/agg rings to prevent overflowing the completion
2456          * ring.
2457          */
2458         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2459
2460         __bnxt_poll_work_done(bp, bnapi);
2461         return rx_pkts;
2462 }
2463
2464 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2465 {
2466         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2467         struct bnxt *bp = bnapi->bp;
2468         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2469         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2470         struct tx_cmp *txcmp;
2471         struct rx_cmp_ext *rxcmp1;
2472         u32 cp_cons, tmp_raw_cons;
2473         u32 raw_cons = cpr->cp_raw_cons;
2474         u32 rx_pkts = 0;
2475         u8 event = 0;
2476
2477         while (1) {
2478                 int rc;
2479
2480                 cp_cons = RING_CMP(raw_cons);
2481                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2482
2483                 if (!TX_CMP_VALID(txcmp, raw_cons))
2484                         break;
2485
2486                 /* The valid test of the entry must be done first before
2487                  * reading any further.
2488                  */
2489                 dma_rmb();
2490                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2491                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2492                         cp_cons = RING_CMP(tmp_raw_cons);
2493                         rxcmp1 = (struct rx_cmp_ext *)
2494                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2495
2496                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2497                                 break;
2498
2499                         /* force an error to recycle the buffer */
2500                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2501                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2502
2503                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2504                         if (likely(rc == -EIO) && budget)
2505                                 rx_pkts++;
2506                         else if (rc == -EBUSY)  /* partial completion */
2507                                 break;
2508                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2509                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2510                         bnxt_hwrm_handler(bp, txcmp);
2511                 } else {
2512                         netdev_err(bp->dev,
2513                                    "Invalid completion received on special ring\n");
2514                 }
2515                 raw_cons = NEXT_RAW_CMP(raw_cons);
2516
2517                 if (rx_pkts == budget)
2518                         break;
2519         }
2520
2521         cpr->cp_raw_cons = raw_cons;
2522         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2523         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2524
2525         if (event & BNXT_AGG_EVENT)
2526                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2527
2528         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2529                 napi_complete_done(napi, rx_pkts);
2530                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2531         }
2532         return rx_pkts;
2533 }
2534
2535 static int bnxt_poll(struct napi_struct *napi, int budget)
2536 {
2537         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2538         struct bnxt *bp = bnapi->bp;
2539         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2540         int work_done = 0;
2541
2542         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2543                 napi_complete(napi);
2544                 return 0;
2545         }
2546         while (1) {
2547                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2548
2549                 if (work_done >= budget) {
2550                         if (!budget)
2551                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2552                         break;
2553                 }
2554
2555                 if (!bnxt_has_work(bp, cpr)) {
2556                         if (napi_complete_done(napi, work_done))
2557                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2558                         break;
2559                 }
2560         }
2561         if (bp->flags & BNXT_FLAG_DIM) {
2562                 struct dim_sample dim_sample = {};
2563
2564                 dim_update_sample(cpr->event_ctr,
2565                                   cpr->rx_packets,
2566                                   cpr->rx_bytes,
2567                                   &dim_sample);
2568                 net_dim(&cpr->dim, dim_sample);
2569         }
2570         return work_done;
2571 }
2572
2573 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2574 {
2575         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2576         int i, work_done = 0;
2577
2578         for (i = 0; i < 2; i++) {
2579                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2580
2581                 if (cpr2) {
2582                         work_done += __bnxt_poll_work(bp, cpr2,
2583                                                       budget - work_done);
2584                         cpr->has_more_work |= cpr2->has_more_work;
2585                 }
2586         }
2587         return work_done;
2588 }
2589
2590 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2591                                  u64 dbr_type)
2592 {
2593         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2594         int i;
2595
2596         for (i = 0; i < 2; i++) {
2597                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2598                 struct bnxt_db_info *db;
2599
2600                 if (cpr2 && cpr2->had_work_done) {
2601                         db = &cpr2->cp_db;
2602                         writeq(db->db_key64 | dbr_type |
2603                                RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2604                         cpr2->had_work_done = 0;
2605                 }
2606         }
2607         __bnxt_poll_work_done(bp, bnapi);
2608 }
2609
2610 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2611 {
2612         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2613         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2614         u32 raw_cons = cpr->cp_raw_cons;
2615         struct bnxt *bp = bnapi->bp;
2616         struct nqe_cn *nqcmp;
2617         int work_done = 0;
2618         u32 cons;
2619
2620         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2621                 napi_complete(napi);
2622                 return 0;
2623         }
2624         if (cpr->has_more_work) {
2625                 cpr->has_more_work = 0;
2626                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2627         }
2628         while (1) {
2629                 cons = RING_CMP(raw_cons);
2630                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2631
2632                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2633                         if (cpr->has_more_work)
2634                                 break;
2635
2636                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2637                         cpr->cp_raw_cons = raw_cons;
2638                         if (napi_complete_done(napi, work_done))
2639                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2640                                                   cpr->cp_raw_cons);
2641                         return work_done;
2642                 }
2643
2644                 /* The valid test of the entry must be done first before
2645                  * reading any further.
2646                  */
2647                 dma_rmb();
2648
2649                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2650                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2651                         struct bnxt_cp_ring_info *cpr2;
2652
2653                         cpr2 = cpr->cp_ring_arr[idx];
2654                         work_done += __bnxt_poll_work(bp, cpr2,
2655                                                       budget - work_done);
2656                         cpr->has_more_work |= cpr2->has_more_work;
2657                 } else {
2658                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2659                 }
2660                 raw_cons = NEXT_RAW_CMP(raw_cons);
2661         }
2662         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2663         if (raw_cons != cpr->cp_raw_cons) {
2664                 cpr->cp_raw_cons = raw_cons;
2665                 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2666         }
2667         return work_done;
2668 }
2669
2670 static void bnxt_free_tx_skbs(struct bnxt *bp)
2671 {
2672         int i, max_idx;
2673         struct pci_dev *pdev = bp->pdev;
2674
2675         if (!bp->tx_ring)
2676                 return;
2677
2678         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2679         for (i = 0; i < bp->tx_nr_rings; i++) {
2680                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2681                 int j;
2682
2683                 for (j = 0; j < max_idx;) {
2684                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2685                         struct sk_buff *skb;
2686                         int k, last;
2687
2688                         if (i < bp->tx_nr_rings_xdp &&
2689                             tx_buf->action == XDP_REDIRECT) {
2690                                 dma_unmap_single(&pdev->dev,
2691                                         dma_unmap_addr(tx_buf, mapping),
2692                                         dma_unmap_len(tx_buf, len),
2693                                         PCI_DMA_TODEVICE);
2694                                 xdp_return_frame(tx_buf->xdpf);
2695                                 tx_buf->action = 0;
2696                                 tx_buf->xdpf = NULL;
2697                                 j++;
2698                                 continue;
2699                         }
2700
2701                         skb = tx_buf->skb;
2702                         if (!skb) {
2703                                 j++;
2704                                 continue;
2705                         }
2706
2707                         tx_buf->skb = NULL;
2708
2709                         if (tx_buf->is_push) {
2710                                 dev_kfree_skb(skb);
2711                                 j += 2;
2712                                 continue;
2713                         }
2714
2715                         dma_unmap_single(&pdev->dev,
2716                                          dma_unmap_addr(tx_buf, mapping),
2717                                          skb_headlen(skb),
2718                                          PCI_DMA_TODEVICE);
2719
2720                         last = tx_buf->nr_frags;
2721                         j += 2;
2722                         for (k = 0; k < last; k++, j++) {
2723                                 int ring_idx = j & bp->tx_ring_mask;
2724                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2725
2726                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2727                                 dma_unmap_page(
2728                                         &pdev->dev,
2729                                         dma_unmap_addr(tx_buf, mapping),
2730                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
2731                         }
2732                         dev_kfree_skb(skb);
2733                 }
2734                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2735         }
2736 }
2737
2738 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2739 {
2740         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2741         struct pci_dev *pdev = bp->pdev;
2742         struct bnxt_tpa_idx_map *map;
2743         int i, max_idx, max_agg_idx;
2744
2745         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2746         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2747         if (!rxr->rx_tpa)
2748                 goto skip_rx_tpa_free;
2749
2750         for (i = 0; i < bp->max_tpa; i++) {
2751                 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2752                 u8 *data = tpa_info->data;
2753
2754                 if (!data)
2755                         continue;
2756
2757                 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2758                                        bp->rx_buf_use_size, bp->rx_dir,
2759                                        DMA_ATTR_WEAK_ORDERING);
2760
2761                 tpa_info->data = NULL;
2762
2763                 kfree(data);
2764         }
2765
2766 skip_rx_tpa_free:
2767         for (i = 0; i < max_idx; i++) {
2768                 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2769                 dma_addr_t mapping = rx_buf->mapping;
2770                 void *data = rx_buf->data;
2771
2772                 if (!data)
2773                         continue;
2774
2775                 rx_buf->data = NULL;
2776                 if (BNXT_RX_PAGE_MODE(bp)) {
2777                         mapping -= bp->rx_dma_offset;
2778                         dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2779                                              bp->rx_dir,
2780                                              DMA_ATTR_WEAK_ORDERING);
2781                         page_pool_recycle_direct(rxr->page_pool, data);
2782                 } else {
2783                         dma_unmap_single_attrs(&pdev->dev, mapping,
2784                                                bp->rx_buf_use_size, bp->rx_dir,
2785                                                DMA_ATTR_WEAK_ORDERING);
2786                         kfree(data);
2787                 }
2788         }
2789         for (i = 0; i < max_agg_idx; i++) {
2790                 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2791                 struct page *page = rx_agg_buf->page;
2792
2793                 if (!page)
2794                         continue;
2795
2796                 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2797                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
2798                                      DMA_ATTR_WEAK_ORDERING);
2799
2800                 rx_agg_buf->page = NULL;
2801                 __clear_bit(i, rxr->rx_agg_bmap);
2802
2803                 __free_page(page);
2804         }
2805         if (rxr->rx_page) {
2806                 __free_page(rxr->rx_page);
2807                 rxr->rx_page = NULL;
2808         }
2809         map = rxr->rx_tpa_idx_map;
2810         if (map)
2811                 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2812 }
2813
2814 static void bnxt_free_rx_skbs(struct bnxt *bp)
2815 {
2816         int i;
2817
2818         if (!bp->rx_ring)
2819                 return;
2820
2821         for (i = 0; i < bp->rx_nr_rings; i++)
2822                 bnxt_free_one_rx_ring_skbs(bp, i);
2823 }
2824
2825 static void bnxt_free_skbs(struct bnxt *bp)
2826 {
2827         bnxt_free_tx_skbs(bp);
2828         bnxt_free_rx_skbs(bp);
2829 }
2830
2831 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2832 {
2833         u8 init_val = mem_init->init_val;
2834         u16 offset = mem_init->offset;
2835         u8 *p2 = p;
2836         int i;
2837
2838         if (!init_val)
2839                 return;
2840         if (offset == BNXT_MEM_INVALID_OFFSET) {
2841                 memset(p, init_val, len);
2842                 return;
2843         }
2844         for (i = 0; i < len; i += mem_init->size)
2845                 *(p2 + i + offset) = init_val;
2846 }
2847
2848 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2849 {
2850         struct pci_dev *pdev = bp->pdev;
2851         int i;
2852
2853         for (i = 0; i < rmem->nr_pages; i++) {
2854                 if (!rmem->pg_arr[i])
2855                         continue;
2856
2857                 dma_free_coherent(&pdev->dev, rmem->page_size,
2858                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2859
2860                 rmem->pg_arr[i] = NULL;
2861         }
2862         if (rmem->pg_tbl) {
2863                 size_t pg_tbl_size = rmem->nr_pages * 8;
2864
2865                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2866                         pg_tbl_size = rmem->page_size;
2867                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2868                                   rmem->pg_tbl, rmem->pg_tbl_map);
2869                 rmem->pg_tbl = NULL;
2870         }
2871         if (rmem->vmem_size && *rmem->vmem) {
2872                 vfree(*rmem->vmem);
2873                 *rmem->vmem = NULL;
2874         }
2875 }
2876
2877 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2878 {
2879         struct pci_dev *pdev = bp->pdev;
2880         u64 valid_bit = 0;
2881         int i;
2882
2883         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2884                 valid_bit = PTU_PTE_VALID;
2885         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2886                 size_t pg_tbl_size = rmem->nr_pages * 8;
2887
2888                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2889                         pg_tbl_size = rmem->page_size;
2890                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2891                                                   &rmem->pg_tbl_map,
2892                                                   GFP_KERNEL);
2893                 if (!rmem->pg_tbl)
2894                         return -ENOMEM;
2895         }
2896
2897         for (i = 0; i < rmem->nr_pages; i++) {
2898                 u64 extra_bits = valid_bit;
2899
2900                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2901                                                      rmem->page_size,
2902                                                      &rmem->dma_arr[i],
2903                                                      GFP_KERNEL);
2904                 if (!rmem->pg_arr[i])
2905                         return -ENOMEM;
2906
2907                 if (rmem->mem_init)
2908                         bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2909                                           rmem->page_size);
2910                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2911                         if (i == rmem->nr_pages - 2 &&
2912                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2913                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2914                         else if (i == rmem->nr_pages - 1 &&
2915                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2916                                 extra_bits |= PTU_PTE_LAST;
2917                         rmem->pg_tbl[i] =
2918                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2919                 }
2920         }
2921
2922         if (rmem->vmem_size) {
2923                 *rmem->vmem = vzalloc(rmem->vmem_size);
2924                 if (!(*rmem->vmem))
2925                         return -ENOMEM;
2926         }
2927         return 0;
2928 }
2929
2930 static void bnxt_free_tpa_info(struct bnxt *bp)
2931 {
2932         int i;
2933
2934         for (i = 0; i < bp->rx_nr_rings; i++) {
2935                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2936
2937                 kfree(rxr->rx_tpa_idx_map);
2938                 rxr->rx_tpa_idx_map = NULL;
2939                 if (rxr->rx_tpa) {
2940                         kfree(rxr->rx_tpa[0].agg_arr);
2941                         rxr->rx_tpa[0].agg_arr = NULL;
2942                 }
2943                 kfree(rxr->rx_tpa);
2944                 rxr->rx_tpa = NULL;
2945         }
2946 }
2947
2948 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2949 {
2950         int i, j, total_aggs = 0;
2951
2952         bp->max_tpa = MAX_TPA;
2953         if (bp->flags & BNXT_FLAG_CHIP_P5) {
2954                 if (!bp->max_tpa_v2)
2955                         return 0;
2956                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2957                 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2958         }
2959
2960         for (i = 0; i < bp->rx_nr_rings; i++) {
2961                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2962                 struct rx_agg_cmp *agg;
2963
2964                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2965                                       GFP_KERNEL);
2966                 if (!rxr->rx_tpa)
2967                         return -ENOMEM;
2968
2969                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2970                         continue;
2971                 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2972                 rxr->rx_tpa[0].agg_arr = agg;
2973                 if (!agg)
2974                         return -ENOMEM;
2975                 for (j = 1; j < bp->max_tpa; j++)
2976                         rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2977                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2978                                               GFP_KERNEL);
2979                 if (!rxr->rx_tpa_idx_map)
2980                         return -ENOMEM;
2981         }
2982         return 0;
2983 }
2984
2985 static void bnxt_free_rx_rings(struct bnxt *bp)
2986 {
2987         int i;
2988
2989         if (!bp->rx_ring)
2990                 return;
2991
2992         bnxt_free_tpa_info(bp);
2993         for (i = 0; i < bp->rx_nr_rings; i++) {
2994                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2995                 struct bnxt_ring_struct *ring;
2996
2997                 if (rxr->xdp_prog)
2998                         bpf_prog_put(rxr->xdp_prog);
2999
3000                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3001                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3002
3003                 page_pool_destroy(rxr->page_pool);
3004                 rxr->page_pool = NULL;
3005
3006                 kfree(rxr->rx_agg_bmap);
3007                 rxr->rx_agg_bmap = NULL;
3008
3009                 ring = &rxr->rx_ring_struct;
3010                 bnxt_free_ring(bp, &ring->ring_mem);
3011
3012                 ring = &rxr->rx_agg_ring_struct;
3013                 bnxt_free_ring(bp, &ring->ring_mem);
3014         }
3015 }
3016
3017 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3018                                    struct bnxt_rx_ring_info *rxr)
3019 {
3020         struct page_pool_params pp = { 0 };
3021
3022         pp.pool_size = bp->rx_ring_size;
3023         pp.nid = dev_to_node(&bp->pdev->dev);
3024         pp.dev = &bp->pdev->dev;
3025         pp.dma_dir = DMA_BIDIRECTIONAL;
3026
3027         rxr->page_pool = page_pool_create(&pp);
3028         if (IS_ERR(rxr->page_pool)) {
3029                 int err = PTR_ERR(rxr->page_pool);
3030
3031                 rxr->page_pool = NULL;
3032                 return err;
3033         }
3034         return 0;
3035 }
3036
3037 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3038 {
3039         int i, rc = 0, agg_rings = 0;
3040
3041         if (!bp->rx_ring)
3042                 return -ENOMEM;
3043
3044         if (bp->flags & BNXT_FLAG_AGG_RINGS)
3045                 agg_rings = 1;
3046
3047         for (i = 0; i < bp->rx_nr_rings; i++) {
3048                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3049                 struct bnxt_ring_struct *ring;
3050
3051                 ring = &rxr->rx_ring_struct;
3052
3053                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3054                 if (rc)
3055                         return rc;
3056
3057                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3058                 if (rc < 0)
3059                         return rc;
3060
3061                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3062                                                 MEM_TYPE_PAGE_POOL,
3063                                                 rxr->page_pool);
3064                 if (rc) {
3065                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3066                         return rc;
3067                 }
3068
3069                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3070                 if (rc)
3071                         return rc;
3072
3073                 ring->grp_idx = i;
3074                 if (agg_rings) {
3075                         u16 mem_size;
3076
3077                         ring = &rxr->rx_agg_ring_struct;
3078                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3079                         if (rc)
3080                                 return rc;
3081
3082                         ring->grp_idx = i;
3083                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3084                         mem_size = rxr->rx_agg_bmap_size / 8;
3085                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3086                         if (!rxr->rx_agg_bmap)
3087                                 return -ENOMEM;
3088                 }
3089         }
3090         if (bp->flags & BNXT_FLAG_TPA)
3091                 rc = bnxt_alloc_tpa_info(bp);
3092         return rc;
3093 }
3094
3095 static void bnxt_free_tx_rings(struct bnxt *bp)
3096 {
3097         int i;
3098         struct pci_dev *pdev = bp->pdev;
3099
3100         if (!bp->tx_ring)
3101                 return;
3102
3103         for (i = 0; i < bp->tx_nr_rings; i++) {
3104                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3105                 struct bnxt_ring_struct *ring;
3106
3107                 if (txr->tx_push) {
3108                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
3109                                           txr->tx_push, txr->tx_push_mapping);
3110                         txr->tx_push = NULL;
3111                 }
3112
3113                 ring = &txr->tx_ring_struct;
3114
3115                 bnxt_free_ring(bp, &ring->ring_mem);
3116         }
3117 }
3118
3119 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3120 {
3121         int i, j, rc;
3122         struct pci_dev *pdev = bp->pdev;
3123
3124         bp->tx_push_size = 0;
3125         if (bp->tx_push_thresh) {
3126                 int push_size;
3127
3128                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3129                                         bp->tx_push_thresh);
3130
3131                 if (push_size > 256) {
3132                         push_size = 0;
3133                         bp->tx_push_thresh = 0;
3134                 }
3135
3136                 bp->tx_push_size = push_size;
3137         }
3138
3139         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3140                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3141                 struct bnxt_ring_struct *ring;
3142                 u8 qidx;
3143
3144                 ring = &txr->tx_ring_struct;
3145
3146                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3147                 if (rc)
3148                         return rc;
3149
3150                 ring->grp_idx = txr->bnapi->index;
3151                 if (bp->tx_push_size) {
3152                         dma_addr_t mapping;
3153
3154                         /* One pre-allocated DMA buffer to backup
3155                          * TX push operation
3156                          */
3157                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
3158                                                 bp->tx_push_size,
3159                                                 &txr->tx_push_mapping,
3160                                                 GFP_KERNEL);
3161
3162                         if (!txr->tx_push)
3163                                 return -ENOMEM;
3164
3165                         mapping = txr->tx_push_mapping +
3166                                 sizeof(struct tx_push_bd);
3167                         txr->data_mapping = cpu_to_le64(mapping);
3168                 }
3169                 qidx = bp->tc_to_qidx[j];
3170                 ring->queue_id = bp->q_info[qidx].queue_id;
3171                 if (i < bp->tx_nr_rings_xdp)
3172                         continue;
3173                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3174                         j++;
3175         }
3176         return 0;
3177 }
3178
3179 static void bnxt_free_cp_rings(struct bnxt *bp)
3180 {
3181         int i;
3182
3183         if (!bp->bnapi)
3184                 return;
3185
3186         for (i = 0; i < bp->cp_nr_rings; i++) {
3187                 struct bnxt_napi *bnapi = bp->bnapi[i];
3188                 struct bnxt_cp_ring_info *cpr;
3189                 struct bnxt_ring_struct *ring;
3190                 int j;
3191
3192                 if (!bnapi)
3193                         continue;
3194
3195                 cpr = &bnapi->cp_ring;
3196                 ring = &cpr->cp_ring_struct;
3197
3198                 bnxt_free_ring(bp, &ring->ring_mem);
3199
3200                 for (j = 0; j < 2; j++) {
3201                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3202
3203                         if (cpr2) {
3204                                 ring = &cpr2->cp_ring_struct;
3205                                 bnxt_free_ring(bp, &ring->ring_mem);
3206                                 kfree(cpr2);
3207                                 cpr->cp_ring_arr[j] = NULL;
3208                         }
3209                 }
3210         }
3211 }
3212
3213 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3214 {
3215         struct bnxt_ring_mem_info *rmem;
3216         struct bnxt_ring_struct *ring;
3217         struct bnxt_cp_ring_info *cpr;
3218         int rc;
3219
3220         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3221         if (!cpr)
3222                 return NULL;
3223
3224         ring = &cpr->cp_ring_struct;
3225         rmem = &ring->ring_mem;
3226         rmem->nr_pages = bp->cp_nr_pages;
3227         rmem->page_size = HW_CMPD_RING_SIZE;
3228         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3229         rmem->dma_arr = cpr->cp_desc_mapping;
3230         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3231         rc = bnxt_alloc_ring(bp, rmem);
3232         if (rc) {
3233                 bnxt_free_ring(bp, rmem);
3234                 kfree(cpr);
3235                 cpr = NULL;
3236         }
3237         return cpr;
3238 }
3239
3240 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3241 {
3242         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3243         int i, rc, ulp_base_vec, ulp_msix;
3244
3245         ulp_msix = bnxt_get_ulp_msix_num(bp);
3246         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3247         for (i = 0; i < bp->cp_nr_rings; i++) {
3248                 struct bnxt_napi *bnapi = bp->bnapi[i];
3249                 struct bnxt_cp_ring_info *cpr;
3250                 struct bnxt_ring_struct *ring;
3251
3252                 if (!bnapi)
3253                         continue;
3254
3255                 cpr = &bnapi->cp_ring;
3256                 cpr->bnapi = bnapi;
3257                 ring = &cpr->cp_ring_struct;
3258
3259                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3260                 if (rc)
3261                         return rc;
3262
3263                 if (ulp_msix && i >= ulp_base_vec)
3264                         ring->map_idx = i + ulp_msix;
3265                 else
3266                         ring->map_idx = i;
3267
3268                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3269                         continue;
3270
3271                 if (i < bp->rx_nr_rings) {
3272                         struct bnxt_cp_ring_info *cpr2 =
3273                                 bnxt_alloc_cp_sub_ring(bp);
3274
3275                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3276                         if (!cpr2)
3277                                 return -ENOMEM;
3278                         cpr2->bnapi = bnapi;
3279                 }
3280                 if ((sh && i < bp->tx_nr_rings) ||
3281                     (!sh && i >= bp->rx_nr_rings)) {
3282                         struct bnxt_cp_ring_info *cpr2 =
3283                                 bnxt_alloc_cp_sub_ring(bp);
3284
3285                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3286                         if (!cpr2)
3287                                 return -ENOMEM;
3288                         cpr2->bnapi = bnapi;
3289                 }
3290         }
3291         return 0;
3292 }
3293
3294 static void bnxt_init_ring_struct(struct bnxt *bp)
3295 {
3296         int i;
3297
3298         for (i = 0; i < bp->cp_nr_rings; i++) {
3299                 struct bnxt_napi *bnapi = bp->bnapi[i];
3300                 struct bnxt_ring_mem_info *rmem;
3301                 struct bnxt_cp_ring_info *cpr;
3302                 struct bnxt_rx_ring_info *rxr;
3303                 struct bnxt_tx_ring_info *txr;
3304                 struct bnxt_ring_struct *ring;
3305
3306                 if (!bnapi)
3307                         continue;
3308
3309                 cpr = &bnapi->cp_ring;
3310                 ring = &cpr->cp_ring_struct;
3311                 rmem = &ring->ring_mem;
3312                 rmem->nr_pages = bp->cp_nr_pages;
3313                 rmem->page_size = HW_CMPD_RING_SIZE;
3314                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3315                 rmem->dma_arr = cpr->cp_desc_mapping;
3316                 rmem->vmem_size = 0;
3317
3318                 rxr = bnapi->rx_ring;
3319                 if (!rxr)
3320                         goto skip_rx;
3321
3322                 ring = &rxr->rx_ring_struct;
3323                 rmem = &ring->ring_mem;
3324                 rmem->nr_pages = bp->rx_nr_pages;
3325                 rmem->page_size = HW_RXBD_RING_SIZE;
3326                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3327                 rmem->dma_arr = rxr->rx_desc_mapping;
3328                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3329                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3330
3331                 ring = &rxr->rx_agg_ring_struct;
3332                 rmem = &ring->ring_mem;
3333                 rmem->nr_pages = bp->rx_agg_nr_pages;
3334                 rmem->page_size = HW_RXBD_RING_SIZE;
3335                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3336                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3337                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3338                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3339
3340 skip_rx:
3341                 txr = bnapi->tx_ring;
3342                 if (!txr)
3343                         continue;
3344
3345                 ring = &txr->tx_ring_struct;
3346                 rmem = &ring->ring_mem;
3347                 rmem->nr_pages = bp->tx_nr_pages;
3348                 rmem->page_size = HW_RXBD_RING_SIZE;
3349                 rmem->pg_arr = (void **)txr->tx_desc_ring;
3350                 rmem->dma_arr = txr->tx_desc_mapping;
3351                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3352                 rmem->vmem = (void **)&txr->tx_buf_ring;
3353         }
3354 }
3355
3356 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3357 {
3358         int i;
3359         u32 prod;
3360         struct rx_bd **rx_buf_ring;
3361
3362         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3363         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3364                 int j;
3365                 struct rx_bd *rxbd;
3366
3367                 rxbd = rx_buf_ring[i];
3368                 if (!rxbd)
3369                         continue;
3370
3371                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3372                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3373                         rxbd->rx_bd_opaque = prod;
3374                 }
3375         }
3376 }
3377
3378 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3379 {
3380         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3381         struct net_device *dev = bp->dev;
3382         u32 prod;
3383         int i;
3384
3385         prod = rxr->rx_prod;
3386         for (i = 0; i < bp->rx_ring_size; i++) {
3387                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3388                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3389                                     ring_nr, i, bp->rx_ring_size);
3390                         break;
3391                 }
3392                 prod = NEXT_RX(prod);
3393         }
3394         rxr->rx_prod = prod;
3395
3396         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3397                 return 0;
3398
3399         prod = rxr->rx_agg_prod;
3400         for (i = 0; i < bp->rx_agg_ring_size; i++) {
3401                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3402                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3403                                     ring_nr, i, bp->rx_ring_size);
3404                         break;
3405                 }
3406                 prod = NEXT_RX_AGG(prod);
3407         }
3408         rxr->rx_agg_prod = prod;
3409
3410         if (rxr->rx_tpa) {
3411                 dma_addr_t mapping;
3412                 u8 *data;
3413
3414                 for (i = 0; i < bp->max_tpa; i++) {
3415                         data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3416                         if (!data)
3417                                 return -ENOMEM;
3418
3419                         rxr->rx_tpa[i].data = data;
3420                         rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3421                         rxr->rx_tpa[i].mapping = mapping;
3422                 }
3423         }
3424         return 0;
3425 }
3426
3427 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3428 {
3429         struct bnxt_rx_ring_info *rxr;
3430         struct bnxt_ring_struct *ring;
3431         u32 type;
3432
3433         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3434                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3435
3436         if (NET_IP_ALIGN == 2)
3437                 type |= RX_BD_FLAGS_SOP;
3438
3439         rxr = &bp->rx_ring[ring_nr];
3440         ring = &rxr->rx_ring_struct;
3441         bnxt_init_rxbd_pages(ring, type);
3442
3443         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3444                 bpf_prog_add(bp->xdp_prog, 1);
3445                 rxr->xdp_prog = bp->xdp_prog;
3446         }
3447         ring->fw_ring_id = INVALID_HW_RING_ID;
3448
3449         ring = &rxr->rx_agg_ring_struct;
3450         ring->fw_ring_id = INVALID_HW_RING_ID;
3451
3452         if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3453                 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3454                         RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3455
3456                 bnxt_init_rxbd_pages(ring, type);
3457         }
3458
3459         return bnxt_alloc_one_rx_ring(bp, ring_nr);
3460 }
3461
3462 static void bnxt_init_cp_rings(struct bnxt *bp)
3463 {
3464         int i, j;
3465
3466         for (i = 0; i < bp->cp_nr_rings; i++) {
3467                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3468                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3469
3470                 ring->fw_ring_id = INVALID_HW_RING_ID;
3471                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3472                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3473                 for (j = 0; j < 2; j++) {
3474                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3475
3476                         if (!cpr2)
3477                                 continue;
3478
3479                         ring = &cpr2->cp_ring_struct;
3480                         ring->fw_ring_id = INVALID_HW_RING_ID;
3481                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3482                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3483                 }
3484         }
3485 }
3486
3487 static int bnxt_init_rx_rings(struct bnxt *bp)
3488 {
3489         int i, rc = 0;
3490
3491         if (BNXT_RX_PAGE_MODE(bp)) {
3492                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3493                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3494         } else {
3495                 bp->rx_offset = BNXT_RX_OFFSET;
3496                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3497         }
3498
3499         for (i = 0; i < bp->rx_nr_rings; i++) {
3500                 rc = bnxt_init_one_rx_ring(bp, i);
3501                 if (rc)
3502                         break;
3503         }
3504
3505         return rc;
3506 }
3507
3508 static int bnxt_init_tx_rings(struct bnxt *bp)
3509 {
3510         u16 i;
3511
3512         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3513                                    MAX_SKB_FRAGS + 1);
3514
3515         for (i = 0; i < bp->tx_nr_rings; i++) {
3516                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3517                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3518
3519                 ring->fw_ring_id = INVALID_HW_RING_ID;
3520         }
3521
3522         return 0;
3523 }
3524
3525 static void bnxt_free_ring_grps(struct bnxt *bp)
3526 {
3527         kfree(bp->grp_info);
3528         bp->grp_info = NULL;
3529 }
3530
3531 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3532 {
3533         int i;
3534
3535         if (irq_re_init) {
3536                 bp->grp_info = kcalloc(bp->cp_nr_rings,
3537                                        sizeof(struct bnxt_ring_grp_info),
3538                                        GFP_KERNEL);
3539                 if (!bp->grp_info)
3540                         return -ENOMEM;
3541         }
3542         for (i = 0; i < bp->cp_nr_rings; i++) {
3543                 if (irq_re_init)
3544                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3545                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3546                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3547                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3548                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3549         }
3550         return 0;
3551 }
3552
3553 static void bnxt_free_vnics(struct bnxt *bp)
3554 {
3555         kfree(bp->vnic_info);
3556         bp->vnic_info = NULL;
3557         bp->nr_vnics = 0;
3558 }
3559
3560 static int bnxt_alloc_vnics(struct bnxt *bp)
3561 {
3562         int num_vnics = 1;
3563
3564 #ifdef CONFIG_RFS_ACCEL
3565         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3566                 num_vnics += bp->rx_nr_rings;
3567 #endif
3568
3569         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3570                 num_vnics++;
3571
3572         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3573                                 GFP_KERNEL);
3574         if (!bp->vnic_info)
3575                 return -ENOMEM;
3576
3577         bp->nr_vnics = num_vnics;
3578         return 0;
3579 }
3580
3581 static void bnxt_init_vnics(struct bnxt *bp)
3582 {
3583         int i;
3584
3585         for (i = 0; i < bp->nr_vnics; i++) {
3586                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3587                 int j;
3588
3589                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3590                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3591                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3592
3593                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3594
3595                 if (bp->vnic_info[i].rss_hash_key) {
3596                         if (i == 0)
3597                                 prandom_bytes(vnic->rss_hash_key,
3598                                               HW_HASH_KEY_SIZE);
3599                         else
3600                                 memcpy(vnic->rss_hash_key,
3601                                        bp->vnic_info[0].rss_hash_key,
3602                                        HW_HASH_KEY_SIZE);
3603                 }
3604         }
3605 }
3606
3607 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3608 {
3609         int pages;
3610
3611         pages = ring_size / desc_per_pg;
3612
3613         if (!pages)
3614                 return 1;
3615
3616         pages++;
3617
3618         while (pages & (pages - 1))
3619                 pages++;
3620
3621         return pages;
3622 }
3623
3624 void bnxt_set_tpa_flags(struct bnxt *bp)
3625 {
3626         bp->flags &= ~BNXT_FLAG_TPA;
3627         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3628                 return;
3629         if (bp->dev->features & NETIF_F_LRO)
3630                 bp->flags |= BNXT_FLAG_LRO;
3631         else if (bp->dev->features & NETIF_F_GRO_HW)
3632                 bp->flags |= BNXT_FLAG_GRO;
3633 }
3634
3635 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3636  * be set on entry.
3637  */
3638 void bnxt_set_ring_params(struct bnxt *bp)
3639 {
3640         u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3641         u32 agg_factor = 0, agg_ring_size = 0;
3642
3643         /* 8 for CRC and VLAN */
3644         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3645
3646         rx_space = rx_size + NET_SKB_PAD +
3647                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3648
3649         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3650         ring_size = bp->rx_ring_size;
3651         bp->rx_agg_ring_size = 0;
3652         bp->rx_agg_nr_pages = 0;
3653
3654         if (bp->flags & BNXT_FLAG_TPA)
3655                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3656
3657         bp->flags &= ~BNXT_FLAG_JUMBO;
3658         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3659                 u32 jumbo_factor;
3660
3661                 bp->flags |= BNXT_FLAG_JUMBO;
3662                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3663                 if (jumbo_factor > agg_factor)
3664                         agg_factor = jumbo_factor;
3665         }
3666         agg_ring_size = ring_size * agg_factor;
3667
3668         if (agg_ring_size) {
3669                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3670                                                         RX_DESC_CNT);
3671                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3672                         u32 tmp = agg_ring_size;
3673
3674                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3675                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3676                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3677                                     tmp, agg_ring_size);
3678                 }
3679                 bp->rx_agg_ring_size = agg_ring_size;
3680                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3681                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3682                 rx_space = rx_size + NET_SKB_PAD +
3683                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3684         }
3685
3686         bp->rx_buf_use_size = rx_size;
3687         bp->rx_buf_size = rx_space;
3688
3689         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3690         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3691
3692         ring_size = bp->tx_ring_size;
3693         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3694         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3695
3696         max_rx_cmpl = bp->rx_ring_size;
3697         /* MAX TPA needs to be added because TPA_START completions are
3698          * immediately recycled, so the TPA completions are not bound by
3699          * the RX ring size.
3700          */
3701         if (bp->flags & BNXT_FLAG_TPA)
3702                 max_rx_cmpl += bp->max_tpa;
3703         /* RX and TPA completions are 32-byte, all others are 16-byte */
3704         ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3705         bp->cp_ring_size = ring_size;
3706
3707         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3708         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3709                 bp->cp_nr_pages = MAX_CP_PAGES;
3710                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3711                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3712                             ring_size, bp->cp_ring_size);
3713         }
3714         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3715         bp->cp_ring_mask = bp->cp_bit - 1;
3716 }
3717
3718 /* Changing allocation mode of RX rings.
3719  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3720  */
3721 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3722 {
3723         if (page_mode) {
3724                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3725                         return -EOPNOTSUPP;
3726                 bp->dev->max_mtu =
3727                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3728                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3729                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3730                 bp->rx_dir = DMA_BIDIRECTIONAL;
3731                 bp->rx_skb_func = bnxt_rx_page_skb;
3732                 /* Disable LRO or GRO_HW */
3733                 netdev_update_features(bp->dev);
3734         } else {
3735                 bp->dev->max_mtu = bp->max_mtu;
3736                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3737                 bp->rx_dir = DMA_FROM_DEVICE;
3738                 bp->rx_skb_func = bnxt_rx_skb;
3739         }
3740         return 0;
3741 }
3742
3743 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3744 {
3745         int i;
3746         struct bnxt_vnic_info *vnic;
3747         struct pci_dev *pdev = bp->pdev;
3748
3749         if (!bp->vnic_info)
3750                 return;
3751
3752         for (i = 0; i < bp->nr_vnics; i++) {
3753                 vnic = &bp->vnic_info[i];
3754
3755                 kfree(vnic->fw_grp_ids);
3756                 vnic->fw_grp_ids = NULL;
3757
3758                 kfree(vnic->uc_list);
3759                 vnic->uc_list = NULL;
3760
3761                 if (vnic->mc_list) {
3762                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3763                                           vnic->mc_list, vnic->mc_list_mapping);
3764                         vnic->mc_list = NULL;
3765                 }
3766
3767                 if (vnic->rss_table) {
3768                         dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3769                                           vnic->rss_table,
3770                                           vnic->rss_table_dma_addr);
3771                         vnic->rss_table = NULL;
3772                 }
3773
3774                 vnic->rss_hash_key = NULL;
3775                 vnic->flags = 0;
3776         }
3777 }
3778
3779 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3780 {
3781         int i, rc = 0, size;
3782         struct bnxt_vnic_info *vnic;
3783         struct pci_dev *pdev = bp->pdev;
3784         int max_rings;
3785
3786         for (i = 0; i < bp->nr_vnics; i++) {
3787                 vnic = &bp->vnic_info[i];
3788
3789                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3790                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3791
3792                         if (mem_size > 0) {
3793                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3794                                 if (!vnic->uc_list) {
3795                                         rc = -ENOMEM;
3796                                         goto out;
3797                                 }
3798                         }
3799                 }
3800
3801                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3802                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3803                         vnic->mc_list =
3804                                 dma_alloc_coherent(&pdev->dev,
3805                                                    vnic->mc_list_size,
3806                                                    &vnic->mc_list_mapping,
3807                                                    GFP_KERNEL);
3808                         if (!vnic->mc_list) {
3809                                 rc = -ENOMEM;
3810                                 goto out;
3811                         }
3812                 }
3813
3814                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3815                         goto vnic_skip_grps;
3816
3817                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3818                         max_rings = bp->rx_nr_rings;
3819                 else
3820                         max_rings = 1;
3821
3822                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3823                 if (!vnic->fw_grp_ids) {
3824                         rc = -ENOMEM;
3825                         goto out;
3826                 }
3827 vnic_skip_grps:
3828                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3829                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3830                         continue;
3831
3832                 /* Allocate rss table and hash key */
3833                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3834                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3835                         size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3836
3837                 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3838                 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3839                                                      vnic->rss_table_size,
3840                                                      &vnic->rss_table_dma_addr,
3841                                                      GFP_KERNEL);
3842                 if (!vnic->rss_table) {
3843                         rc = -ENOMEM;
3844                         goto out;
3845                 }
3846
3847                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3848                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3849         }
3850         return 0;
3851
3852 out:
3853         return rc;
3854 }
3855
3856 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3857 {
3858         struct pci_dev *pdev = bp->pdev;
3859
3860         if (bp->hwrm_cmd_resp_addr) {
3861                 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3862                                   bp->hwrm_cmd_resp_dma_addr);
3863                 bp->hwrm_cmd_resp_addr = NULL;
3864         }
3865
3866         if (bp->hwrm_cmd_kong_resp_addr) {
3867                 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3868                                   bp->hwrm_cmd_kong_resp_addr,
3869                                   bp->hwrm_cmd_kong_resp_dma_addr);
3870                 bp->hwrm_cmd_kong_resp_addr = NULL;
3871         }
3872 }
3873
3874 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3875 {
3876         struct pci_dev *pdev = bp->pdev;
3877
3878         if (bp->hwrm_cmd_kong_resp_addr)
3879                 return 0;
3880
3881         bp->hwrm_cmd_kong_resp_addr =
3882                 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3883                                    &bp->hwrm_cmd_kong_resp_dma_addr,
3884                                    GFP_KERNEL);
3885         if (!bp->hwrm_cmd_kong_resp_addr)
3886                 return -ENOMEM;
3887
3888         return 0;
3889 }
3890
3891 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3892 {
3893         struct pci_dev *pdev = bp->pdev;
3894
3895         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3896                                                    &bp->hwrm_cmd_resp_dma_addr,
3897                                                    GFP_KERNEL);
3898         if (!bp->hwrm_cmd_resp_addr)
3899                 return -ENOMEM;
3900
3901         return 0;
3902 }
3903
3904 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3905 {
3906         if (bp->hwrm_short_cmd_req_addr) {
3907                 struct pci_dev *pdev = bp->pdev;
3908
3909                 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3910                                   bp->hwrm_short_cmd_req_addr,
3911                                   bp->hwrm_short_cmd_req_dma_addr);
3912                 bp->hwrm_short_cmd_req_addr = NULL;
3913         }
3914 }
3915
3916 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3917 {
3918         struct pci_dev *pdev = bp->pdev;
3919
3920         if (bp->hwrm_short_cmd_req_addr)
3921                 return 0;
3922
3923         bp->hwrm_short_cmd_req_addr =
3924                 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3925                                    &bp->hwrm_short_cmd_req_dma_addr,
3926                                    GFP_KERNEL);
3927         if (!bp->hwrm_short_cmd_req_addr)
3928                 return -ENOMEM;
3929
3930         return 0;
3931 }
3932
3933 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3934 {
3935         kfree(stats->hw_masks);
3936         stats->hw_masks = NULL;
3937         kfree(stats->sw_stats);
3938         stats->sw_stats = NULL;
3939         if (stats->hw_stats) {
3940                 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3941                                   stats->hw_stats_map);
3942                 stats->hw_stats = NULL;
3943         }
3944 }
3945
3946 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
3947                                 bool alloc_masks)
3948 {
3949         stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
3950                                              &stats->hw_stats_map, GFP_KERNEL);
3951         if (!stats->hw_stats)
3952                 return -ENOMEM;
3953
3954         stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
3955         if (!stats->sw_stats)
3956                 goto stats_mem_err;
3957
3958         if (alloc_masks) {
3959                 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
3960                 if (!stats->hw_masks)
3961                         goto stats_mem_err;
3962         }
3963         return 0;
3964
3965 stats_mem_err:
3966         bnxt_free_stats_mem(bp, stats);
3967         return -ENOMEM;
3968 }
3969
3970 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
3971 {
3972         int i;
3973
3974         for (i = 0; i < count; i++)
3975                 mask_arr[i] = mask;
3976 }
3977
3978 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
3979 {
3980         int i;
3981
3982         for (i = 0; i < count; i++)
3983                 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
3984 }
3985
3986 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
3987                                     struct bnxt_stats_mem *stats)
3988 {
3989         struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3990         struct hwrm_func_qstats_ext_input req = {0};
3991         __le64 *hw_masks;
3992         int rc;
3993
3994         if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
3995             !(bp->flags & BNXT_FLAG_CHIP_P5))
3996                 return -EOPNOTSUPP;
3997
3998         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
3999         req.fid = cpu_to_le16(0xffff);
4000         req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4001         mutex_lock(&bp->hwrm_cmd_lock);
4002         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4003         if (rc)
4004                 goto qstat_exit;
4005
4006         hw_masks = &resp->rx_ucast_pkts;
4007         bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4008
4009 qstat_exit:
4010         mutex_unlock(&bp->hwrm_cmd_lock);
4011         return rc;
4012 }
4013
4014 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4015 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4016
4017 static void bnxt_init_stats(struct bnxt *bp)
4018 {
4019         struct bnxt_napi *bnapi = bp->bnapi[0];
4020         struct bnxt_cp_ring_info *cpr;
4021         struct bnxt_stats_mem *stats;
4022         __le64 *rx_stats, *tx_stats;
4023         int rc, rx_count, tx_count;
4024         u64 *rx_masks, *tx_masks;
4025         u64 mask;
4026         u8 flags;
4027
4028         cpr = &bnapi->cp_ring;
4029         stats = &cpr->stats;
4030         rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4031         if (rc) {
4032                 if (bp->flags & BNXT_FLAG_CHIP_P5)
4033                         mask = (1ULL << 48) - 1;
4034                 else
4035                         mask = -1ULL;
4036                 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4037         }
4038         if (bp->flags & BNXT_FLAG_PORT_STATS) {
4039                 stats = &bp->port_stats;
4040                 rx_stats = stats->hw_stats;
4041                 rx_masks = stats->hw_masks;
4042                 rx_count = sizeof(struct rx_port_stats) / 8;
4043                 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4044                 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4045                 tx_count = sizeof(struct tx_port_stats) / 8;
4046
4047                 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4048                 rc = bnxt_hwrm_port_qstats(bp, flags);
4049                 if (rc) {
4050                         mask = (1ULL << 40) - 1;
4051
4052                         bnxt_fill_masks(rx_masks, mask, rx_count);
4053                         bnxt_fill_masks(tx_masks, mask, tx_count);
4054                 } else {
4055                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4056                         bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4057                         bnxt_hwrm_port_qstats(bp, 0);
4058                 }
4059         }
4060         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4061                 stats = &bp->rx_port_stats_ext;
4062                 rx_stats = stats->hw_stats;
4063                 rx_masks = stats->hw_masks;
4064                 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4065                 stats = &bp->tx_port_stats_ext;
4066                 tx_stats = stats->hw_stats;
4067                 tx_masks = stats->hw_masks;
4068                 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4069
4070                 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4071                 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4072                 if (rc) {
4073                         mask = (1ULL << 40) - 1;
4074
4075                         bnxt_fill_masks(rx_masks, mask, rx_count);
4076                         if (tx_stats)
4077                                 bnxt_fill_masks(tx_masks, mask, tx_count);
4078                 } else {
4079                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4080                         if (tx_stats)
4081                                 bnxt_copy_hw_masks(tx_masks, tx_stats,
4082                                                    tx_count);
4083                         bnxt_hwrm_port_qstats_ext(bp, 0);
4084                 }
4085         }
4086 }
4087
4088 static void bnxt_free_port_stats(struct bnxt *bp)
4089 {
4090         bp->flags &= ~BNXT_FLAG_PORT_STATS;
4091         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4092
4093         bnxt_free_stats_mem(bp, &bp->port_stats);
4094         bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4095         bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4096 }
4097
4098 static void bnxt_free_ring_stats(struct bnxt *bp)
4099 {
4100         int i;
4101
4102         if (!bp->bnapi)
4103                 return;
4104
4105         for (i = 0; i < bp->cp_nr_rings; i++) {
4106                 struct bnxt_napi *bnapi = bp->bnapi[i];
4107                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4108
4109                 bnxt_free_stats_mem(bp, &cpr->stats);
4110         }
4111 }
4112
4113 static int bnxt_alloc_stats(struct bnxt *bp)
4114 {
4115         u32 size, i;
4116         int rc;
4117
4118         size = bp->hw_ring_stats_size;
4119
4120         for (i = 0; i < bp->cp_nr_rings; i++) {
4121                 struct bnxt_napi *bnapi = bp->bnapi[i];
4122                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4123
4124                 cpr->stats.len = size;
4125                 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4126                 if (rc)
4127                         return rc;
4128
4129                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4130         }
4131
4132         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4133                 return 0;
4134
4135         if (bp->port_stats.hw_stats)
4136                 goto alloc_ext_stats;
4137
4138         bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4139         rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4140         if (rc)
4141                 return rc;
4142
4143         bp->flags |= BNXT_FLAG_PORT_STATS;
4144
4145 alloc_ext_stats:
4146         /* Display extended statistics only if FW supports it */
4147         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4148                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4149                         return 0;
4150
4151         if (bp->rx_port_stats_ext.hw_stats)
4152                 goto alloc_tx_ext_stats;
4153
4154         bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4155         rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4156         /* Extended stats are optional */
4157         if (rc)
4158                 return 0;
4159
4160 alloc_tx_ext_stats:
4161         if (bp->tx_port_stats_ext.hw_stats)
4162                 return 0;
4163
4164         if (bp->hwrm_spec_code >= 0x10902 ||
4165             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4166                 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4167                 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4168                 /* Extended stats are optional */
4169                 if (rc)
4170                         return 0;
4171         }
4172         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4173         return 0;
4174 }
4175
4176 static void bnxt_clear_ring_indices(struct bnxt *bp)
4177 {
4178         int i;
4179
4180         if (!bp->bnapi)
4181                 return;
4182
4183         for (i = 0; i < bp->cp_nr_rings; i++) {
4184                 struct bnxt_napi *bnapi = bp->bnapi[i];
4185                 struct bnxt_cp_ring_info *cpr;
4186                 struct bnxt_rx_ring_info *rxr;
4187                 struct bnxt_tx_ring_info *txr;
4188
4189                 if (!bnapi)
4190                         continue;
4191
4192                 cpr = &bnapi->cp_ring;
4193                 cpr->cp_raw_cons = 0;
4194
4195                 txr = bnapi->tx_ring;
4196                 if (txr) {
4197                         txr->tx_prod = 0;
4198                         txr->tx_cons = 0;
4199                 }
4200
4201                 rxr = bnapi->rx_ring;
4202                 if (rxr) {
4203                         rxr->rx_prod = 0;
4204                         rxr->rx_agg_prod = 0;
4205                         rxr->rx_sw_agg_prod = 0;
4206                         rxr->rx_next_cons = 0;
4207                 }
4208         }
4209 }
4210
4211 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4212 {
4213 #ifdef CONFIG_RFS_ACCEL
4214         int i;
4215
4216         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
4217          * safe to delete the hash table.
4218          */
4219         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4220                 struct hlist_head *head;
4221                 struct hlist_node *tmp;
4222                 struct bnxt_ntuple_filter *fltr;
4223
4224                 head = &bp->ntp_fltr_hash_tbl[i];
4225                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4226                         hlist_del(&fltr->hash);
4227                         kfree(fltr);
4228                 }
4229         }
4230         if (irq_reinit) {
4231                 kfree(bp->ntp_fltr_bmap);
4232                 bp->ntp_fltr_bmap = NULL;
4233         }
4234         bp->ntp_fltr_count = 0;
4235 #endif
4236 }
4237
4238 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4239 {
4240 #ifdef CONFIG_RFS_ACCEL
4241         int i, rc = 0;
4242
4243         if (!(bp->flags & BNXT_FLAG_RFS))
4244                 return 0;
4245
4246         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4247                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4248
4249         bp->ntp_fltr_count = 0;
4250         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4251                                     sizeof(long),
4252                                     GFP_KERNEL);
4253
4254         if (!bp->ntp_fltr_bmap)
4255                 rc = -ENOMEM;
4256
4257         return rc;
4258 #else
4259         return 0;
4260 #endif
4261 }
4262
4263 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4264 {
4265         bnxt_free_vnic_attributes(bp);
4266         bnxt_free_tx_rings(bp);
4267         bnxt_free_rx_rings(bp);
4268         bnxt_free_cp_rings(bp);
4269         bnxt_free_ntp_fltrs(bp, irq_re_init);
4270         if (irq_re_init) {
4271                 bnxt_free_ring_stats(bp);
4272                 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4273                     test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4274                         bnxt_free_port_stats(bp);
4275                 bnxt_free_ring_grps(bp);
4276                 bnxt_free_vnics(bp);
4277                 kfree(bp->tx_ring_map);
4278                 bp->tx_ring_map = NULL;
4279                 kfree(bp->tx_ring);
4280                 bp->tx_ring = NULL;
4281                 kfree(bp->rx_ring);
4282                 bp->rx_ring = NULL;
4283                 kfree(bp->bnapi);
4284                 bp->bnapi = NULL;
4285         } else {
4286                 bnxt_clear_ring_indices(bp);
4287         }
4288 }
4289
4290 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4291 {
4292         int i, j, rc, size, arr_size;
4293         void *bnapi;
4294
4295         if (irq_re_init) {
4296                 /* Allocate bnapi mem pointer array and mem block for
4297                  * all queues
4298                  */
4299                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4300                                 bp->cp_nr_rings);
4301                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4302                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4303                 if (!bnapi)
4304                         return -ENOMEM;
4305
4306                 bp->bnapi = bnapi;
4307                 bnapi += arr_size;
4308                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4309                         bp->bnapi[i] = bnapi;
4310                         bp->bnapi[i]->index = i;
4311                         bp->bnapi[i]->bp = bp;
4312                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4313                                 struct bnxt_cp_ring_info *cpr =
4314                                         &bp->bnapi[i]->cp_ring;
4315
4316                                 cpr->cp_ring_struct.ring_mem.flags =
4317                                         BNXT_RMEM_RING_PTE_FLAG;
4318                         }
4319                 }
4320
4321                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4322                                       sizeof(struct bnxt_rx_ring_info),
4323                                       GFP_KERNEL);
4324                 if (!bp->rx_ring)
4325                         return -ENOMEM;
4326
4327                 for (i = 0; i < bp->rx_nr_rings; i++) {
4328                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4329
4330                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4331                                 rxr->rx_ring_struct.ring_mem.flags =
4332                                         BNXT_RMEM_RING_PTE_FLAG;
4333                                 rxr->rx_agg_ring_struct.ring_mem.flags =
4334                                         BNXT_RMEM_RING_PTE_FLAG;
4335                         }
4336                         rxr->bnapi = bp->bnapi[i];
4337                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4338                 }
4339
4340                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4341                                       sizeof(struct bnxt_tx_ring_info),
4342                                       GFP_KERNEL);
4343                 if (!bp->tx_ring)
4344                         return -ENOMEM;
4345
4346                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4347                                           GFP_KERNEL);
4348
4349                 if (!bp->tx_ring_map)
4350                         return -ENOMEM;
4351
4352                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4353                         j = 0;
4354                 else
4355                         j = bp->rx_nr_rings;
4356
4357                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4358                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4359
4360                         if (bp->flags & BNXT_FLAG_CHIP_P5)
4361                                 txr->tx_ring_struct.ring_mem.flags =
4362                                         BNXT_RMEM_RING_PTE_FLAG;
4363                         txr->bnapi = bp->bnapi[j];
4364                         bp->bnapi[j]->tx_ring = txr;
4365                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4366                         if (i >= bp->tx_nr_rings_xdp) {
4367                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4368                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
4369                         } else {
4370                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4371                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4372                         }
4373                 }
4374
4375                 rc = bnxt_alloc_stats(bp);
4376                 if (rc)
4377                         goto alloc_mem_err;
4378                 bnxt_init_stats(bp);
4379
4380                 rc = bnxt_alloc_ntp_fltrs(bp);
4381                 if (rc)
4382                         goto alloc_mem_err;
4383
4384                 rc = bnxt_alloc_vnics(bp);
4385                 if (rc)
4386                         goto alloc_mem_err;
4387         }
4388
4389         bnxt_init_ring_struct(bp);
4390
4391         rc = bnxt_alloc_rx_rings(bp);
4392         if (rc)
4393                 goto alloc_mem_err;
4394
4395         rc = bnxt_alloc_tx_rings(bp);
4396         if (rc)
4397                 goto alloc_mem_err;
4398
4399         rc = bnxt_alloc_cp_rings(bp);
4400         if (rc)
4401                 goto alloc_mem_err;
4402
4403         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4404                                   BNXT_VNIC_UCAST_FLAG;
4405         rc = bnxt_alloc_vnic_attributes(bp);
4406         if (rc)
4407                 goto alloc_mem_err;
4408         return 0;
4409
4410 alloc_mem_err:
4411         bnxt_free_mem(bp, true);
4412         return rc;
4413 }
4414
4415 static void bnxt_disable_int(struct bnxt *bp)
4416 {
4417         int i;
4418
4419         if (!bp->bnapi)
4420                 return;
4421
4422         for (i = 0; i < bp->cp_nr_rings; i++) {
4423                 struct bnxt_napi *bnapi = bp->bnapi[i];
4424                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4425                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4426
4427                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4428                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4429         }
4430 }
4431
4432 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4433 {
4434         struct bnxt_napi *bnapi = bp->bnapi[n];
4435         struct bnxt_cp_ring_info *cpr;
4436
4437         cpr = &bnapi->cp_ring;
4438         return cpr->cp_ring_struct.map_idx;
4439 }
4440
4441 static void bnxt_disable_int_sync(struct bnxt *bp)
4442 {
4443         int i;
4444
4445         if (!bp->irq_tbl)
4446                 return;
4447
4448         atomic_inc(&bp->intr_sem);
4449
4450         bnxt_disable_int(bp);
4451         for (i = 0; i < bp->cp_nr_rings; i++) {
4452                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4453
4454                 synchronize_irq(bp->irq_tbl[map_idx].vector);
4455         }
4456 }
4457
4458 static void bnxt_enable_int(struct bnxt *bp)
4459 {
4460         int i;
4461
4462         atomic_set(&bp->intr_sem, 0);
4463         for (i = 0; i < bp->cp_nr_rings; i++) {
4464                 struct bnxt_napi *bnapi = bp->bnapi[i];
4465                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4466
4467                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4468         }
4469 }
4470
4471 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4472                             u16 cmpl_ring, u16 target_id)
4473 {
4474         struct input *req = request;
4475
4476         req->req_type = cpu_to_le16(req_type);
4477         req->cmpl_ring = cpu_to_le16(cmpl_ring);
4478         req->target_id = cpu_to_le16(target_id);
4479         if (bnxt_kong_hwrm_message(bp, req))
4480                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4481         else
4482                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4483 }
4484
4485 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4486 {
4487         switch (hwrm_err) {
4488         case HWRM_ERR_CODE_SUCCESS:
4489                 return 0;
4490         case HWRM_ERR_CODE_RESOURCE_LOCKED:
4491                 return -EROFS;
4492         case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4493                 return -EACCES;
4494         case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4495                 return -ENOSPC;
4496         case HWRM_ERR_CODE_INVALID_PARAMS:
4497         case HWRM_ERR_CODE_INVALID_FLAGS:
4498         case HWRM_ERR_CODE_INVALID_ENABLES:
4499         case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4500         case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4501                 return -EINVAL;
4502         case HWRM_ERR_CODE_NO_BUFFER:
4503                 return -ENOMEM;
4504         case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4505         case HWRM_ERR_CODE_BUSY:
4506                 return -EAGAIN;
4507         case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4508                 return -EOPNOTSUPP;
4509         default:
4510                 return -EIO;
4511         }
4512 }
4513
4514 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4515                                  int timeout, bool silent)
4516 {
4517         int i, intr_process, rc, tmo_count;
4518         struct input *req = msg;
4519         u32 *data = msg;
4520         u8 *valid;
4521         u16 cp_ring_id, len = 0;
4522         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4523         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4524         struct hwrm_short_input short_input = {0};
4525         u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4526         u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4527         u16 dst = BNXT_HWRM_CHNL_CHIMP;
4528
4529         if (BNXT_NO_FW_ACCESS(bp) &&
4530             le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
4531                 return -EBUSY;
4532
4533         if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4534                 if (msg_len > bp->hwrm_max_ext_req_len ||
4535                     !bp->hwrm_short_cmd_req_addr)
4536                         return -EINVAL;
4537         }
4538
4539         if (bnxt_hwrm_kong_chnl(bp, req)) {
4540                 dst = BNXT_HWRM_CHNL_KONG;
4541                 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4542                 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4543                 resp = bp->hwrm_cmd_kong_resp_addr;
4544         }
4545
4546         memset(resp, 0, PAGE_SIZE);
4547         cp_ring_id = le16_to_cpu(req->cmpl_ring);
4548         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4549
4550         req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4551         /* currently supports only one outstanding message */
4552         if (intr_process)
4553                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4554
4555         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4556             msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4557                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4558                 u16 max_msg_len;
4559
4560                 /* Set boundary for maximum extended request length for short
4561                  * cmd format. If passed up from device use the max supported
4562                  * internal req length.
4563                  */
4564                 max_msg_len = bp->hwrm_max_ext_req_len;
4565
4566                 memcpy(short_cmd_req, req, msg_len);
4567                 if (msg_len < max_msg_len)
4568                         memset(short_cmd_req + msg_len, 0,
4569                                max_msg_len - msg_len);
4570
4571                 short_input.req_type = req->req_type;
4572                 short_input.signature =
4573                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4574                 short_input.size = cpu_to_le16(msg_len);
4575                 short_input.req_addr =
4576                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4577
4578                 data = (u32 *)&short_input;
4579                 msg_len = sizeof(short_input);
4580
4581                 /* Sync memory write before updating doorbell */
4582                 wmb();
4583
4584                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4585         }
4586
4587         /* Write request msg to hwrm channel */
4588         __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4589
4590         for (i = msg_len; i < max_req_len; i += 4)
4591                 writel(0, bp->bar0 + bar_offset + i);
4592
4593         /* Ring channel doorbell */
4594         writel(1, bp->bar0 + doorbell_offset);
4595
4596         if (!pci_is_enabled(bp->pdev))
4597                 return -ENODEV;
4598
4599         if (!timeout)
4600                 timeout = DFLT_HWRM_CMD_TIMEOUT;
4601         /* Limit timeout to an upper limit */
4602         timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT);
4603         /* convert timeout to usec */
4604         timeout *= 1000;
4605
4606         i = 0;
4607         /* Short timeout for the first few iterations:
4608          * number of loops = number of loops for short timeout +
4609          * number of loops for standard timeout.
4610          */
4611         tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4612         timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4613         tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4614
4615         if (intr_process) {
4616                 u16 seq_id = bp->hwrm_intr_seq_id;
4617
4618                 /* Wait until hwrm response cmpl interrupt is processed */
4619                 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4620                        i++ < tmo_count) {
4621                         /* Abort the wait for completion if the FW health
4622                          * check has failed.
4623                          */
4624                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4625                                 return -EBUSY;
4626                         /* on first few passes, just barely sleep */
4627                         if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
4628                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4629                                              HWRM_SHORT_MAX_TIMEOUT);
4630                         } else {
4631                                 if (HWRM_WAIT_MUST_ABORT(bp, req))
4632                                         break;
4633                                 usleep_range(HWRM_MIN_TIMEOUT,
4634                                              HWRM_MAX_TIMEOUT);
4635                         }
4636                 }
4637
4638                 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4639                         if (!silent)
4640                                 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4641                                            le16_to_cpu(req->req_type));
4642                         return -EBUSY;
4643                 }
4644                 len = le16_to_cpu(resp->resp_len);
4645                 valid = ((u8 *)resp) + len - 1;
4646         } else {
4647                 int j;
4648
4649                 /* Check if response len is updated */
4650                 for (i = 0; i < tmo_count; i++) {
4651                         /* Abort the wait for completion if the FW health
4652                          * check has failed.
4653                          */
4654                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4655                                 return -EBUSY;
4656                         len = le16_to_cpu(resp->resp_len);
4657                         if (len)
4658                                 break;
4659                         /* on first few passes, just barely sleep */
4660                         if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
4661                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4662                                              HWRM_SHORT_MAX_TIMEOUT);
4663                         } else {
4664                                 if (HWRM_WAIT_MUST_ABORT(bp, req))
4665                                         goto timeout_abort;
4666                                 usleep_range(HWRM_MIN_TIMEOUT,
4667                                              HWRM_MAX_TIMEOUT);
4668                         }
4669                 }
4670
4671                 if (i >= tmo_count) {
4672 timeout_abort:
4673                         if (!silent)
4674                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4675                                            HWRM_TOTAL_TIMEOUT(i),
4676                                            le16_to_cpu(req->req_type),
4677                                            le16_to_cpu(req->seq_id), len);
4678                         return -EBUSY;
4679                 }
4680
4681                 /* Last byte of resp contains valid bit */
4682                 valid = ((u8 *)resp) + len - 1;
4683                 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4684                         /* make sure we read from updated DMA memory */
4685                         dma_rmb();
4686                         if (*valid)
4687                                 break;
4688                         usleep_range(1, 5);
4689                 }
4690
4691                 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4692                         if (!silent)
4693                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4694                                            HWRM_TOTAL_TIMEOUT(i),
4695                                            le16_to_cpu(req->req_type),
4696                                            le16_to_cpu(req->seq_id), len,
4697                                            *valid);
4698                         return -EBUSY;
4699                 }
4700         }
4701
4702         /* Zero valid bit for compatibility.  Valid bit in an older spec
4703          * may become a new field in a newer spec.  We must make sure that
4704          * a new field not implemented by old spec will read zero.
4705          */
4706         *valid = 0;
4707         rc = le16_to_cpu(resp->error_code);
4708         if (rc && !silent)
4709                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4710                            le16_to_cpu(resp->req_type),
4711                            le16_to_cpu(resp->seq_id), rc);
4712         return bnxt_hwrm_to_stderr(rc);
4713 }
4714
4715 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4716 {
4717         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4718 }
4719
4720 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4721                               int timeout)
4722 {
4723         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4724 }
4725
4726 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4727 {
4728         int rc;
4729
4730         mutex_lock(&bp->hwrm_cmd_lock);
4731         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4732         mutex_unlock(&bp->hwrm_cmd_lock);
4733         return rc;
4734 }
4735
4736 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4737                              int timeout)
4738 {
4739         int rc;
4740
4741         mutex_lock(&bp->hwrm_cmd_lock);
4742         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4743         mutex_unlock(&bp->hwrm_cmd_lock);
4744         return rc;
4745 }
4746
4747 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4748                             bool async_only)
4749 {
4750         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4751         struct hwrm_func_drv_rgtr_input req = {0};
4752         DECLARE_BITMAP(async_events_bmap, 256);
4753         u32 *events = (u32 *)async_events_bmap;
4754         u32 flags;
4755         int rc, i;
4756
4757         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4758
4759         req.enables =
4760                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4761                             FUNC_DRV_RGTR_REQ_ENABLES_VER |
4762                             FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4763
4764         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4765         flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4766         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4767                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4768         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4769                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4770                          FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4771         req.flags = cpu_to_le32(flags);
4772         req.ver_maj_8b = DRV_VER_MAJ;
4773         req.ver_min_8b = DRV_VER_MIN;
4774         req.ver_upd_8b = DRV_VER_UPD;
4775         req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4776         req.ver_min = cpu_to_le16(DRV_VER_MIN);
4777         req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4778
4779         if (BNXT_PF(bp)) {
4780                 u32 data[8];
4781                 int i;
4782
4783                 memset(data, 0, sizeof(data));
4784                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4785                         u16 cmd = bnxt_vf_req_snif[i];
4786                         unsigned int bit, idx;
4787
4788                         idx = cmd / 32;
4789                         bit = cmd % 32;
4790                         data[idx] |= 1 << bit;
4791                 }
4792
4793                 for (i = 0; i < 8; i++)
4794                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4795
4796                 req.enables |=
4797                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4798         }
4799
4800         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4801                 req.flags |= cpu_to_le32(
4802                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4803
4804         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4805         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4806                 u16 event_id = bnxt_async_events_arr[i];
4807
4808                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4809                     !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4810                         continue;
4811                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4812         }
4813         if (bmap && bmap_size) {
4814                 for (i = 0; i < bmap_size; i++) {
4815                         if (test_bit(i, bmap))
4816                                 __set_bit(i, async_events_bmap);
4817                 }
4818         }
4819         for (i = 0; i < 8; i++)
4820                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4821
4822         if (async_only)
4823                 req.enables =
4824                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4825
4826         mutex_lock(&bp->hwrm_cmd_lock);
4827         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4828         if (!rc) {
4829                 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4830                 if (resp->flags &
4831                     cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4832                         bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4833         }
4834         mutex_unlock(&bp->hwrm_cmd_lock);
4835         return rc;
4836 }
4837
4838 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4839 {
4840         struct hwrm_func_drv_unrgtr_input req = {0};
4841
4842         if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4843                 return 0;
4844
4845         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4846         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4847 }
4848
4849 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4850 {
4851         u32 rc = 0;
4852         struct hwrm_tunnel_dst_port_free_input req = {0};
4853
4854         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4855         req.tunnel_type = tunnel_type;
4856
4857         switch (tunnel_type) {
4858         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4859                 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4860                 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4861                 break;
4862         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4863                 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4864                 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4865                 break;
4866         default:
4867                 break;
4868         }
4869
4870         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4871         if (rc)
4872                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4873                            rc);
4874         return rc;
4875 }
4876
4877 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4878                                            u8 tunnel_type)
4879 {
4880         u32 rc = 0;
4881         struct hwrm_tunnel_dst_port_alloc_input req = {0};
4882         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4883
4884         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4885
4886         req.tunnel_type = tunnel_type;
4887         req.tunnel_dst_port_val = port;
4888
4889         mutex_lock(&bp->hwrm_cmd_lock);
4890         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4891         if (rc) {
4892                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4893                            rc);
4894                 goto err_out;
4895         }
4896
4897         switch (tunnel_type) {
4898         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4899                 bp->vxlan_fw_dst_port_id =
4900                         le16_to_cpu(resp->tunnel_dst_port_id);
4901                 break;
4902         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4903                 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4904                 break;
4905         default:
4906                 break;
4907         }
4908
4909 err_out:
4910         mutex_unlock(&bp->hwrm_cmd_lock);
4911         return rc;
4912 }
4913
4914 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4915 {
4916         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4917         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4918
4919         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4920         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4921
4922         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4923         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4924         req.mask = cpu_to_le32(vnic->rx_mask);
4925         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4926 }
4927
4928 #ifdef CONFIG_RFS_ACCEL
4929 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4930                                             struct bnxt_ntuple_filter *fltr)
4931 {
4932         struct hwrm_cfa_ntuple_filter_free_input req = {0};
4933
4934         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4935         req.ntuple_filter_id = fltr->filter_id;
4936         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4937 }
4938
4939 #define BNXT_NTP_FLTR_FLAGS                                     \
4940         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4941          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4942          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4943          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4944          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4945          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4946          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4947          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4948          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4949          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4950          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4951          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4952          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4953          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4954
4955 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4956                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4957
4958 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4959                                              struct bnxt_ntuple_filter *fltr)
4960 {
4961         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4962         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4963         struct flow_keys *keys = &fltr->fkeys;
4964         struct bnxt_vnic_info *vnic;
4965         u32 flags = 0;
4966         int rc = 0;
4967
4968         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4969         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4970
4971         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4972                 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4973                 req.dst_id = cpu_to_le16(fltr->rxq);
4974         } else {
4975                 vnic = &bp->vnic_info[fltr->rxq + 1];
4976                 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4977         }
4978         req.flags = cpu_to_le32(flags);
4979         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4980
4981         req.ethertype = htons(ETH_P_IP);
4982         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4983         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4984         req.ip_protocol = keys->basic.ip_proto;
4985
4986         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4987                 int i;
4988
4989                 req.ethertype = htons(ETH_P_IPV6);
4990                 req.ip_addr_type =
4991                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4992                 *(struct in6_addr *)&req.src_ipaddr[0] =
4993                         keys->addrs.v6addrs.src;
4994                 *(struct in6_addr *)&req.dst_ipaddr[0] =
4995                         keys->addrs.v6addrs.dst;
4996                 for (i = 0; i < 4; i++) {
4997                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4998                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4999                 }
5000         } else {
5001                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
5002                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
5003                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
5004                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
5005         }
5006         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
5007                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
5008                 req.tunnel_type =
5009                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
5010         }
5011
5012         req.src_port = keys->ports.src;
5013         req.src_port_mask = cpu_to_be16(0xffff);
5014         req.dst_port = keys->ports.dst;
5015         req.dst_port_mask = cpu_to_be16(0xffff);
5016
5017         mutex_lock(&bp->hwrm_cmd_lock);
5018         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5019         if (!rc) {
5020                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
5021                 fltr->filter_id = resp->ntuple_filter_id;
5022         }
5023         mutex_unlock(&bp->hwrm_cmd_lock);
5024         return rc;
5025 }
5026 #endif
5027
5028 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5029                                      u8 *mac_addr)
5030 {
5031         u32 rc = 0;
5032         struct hwrm_cfa_l2_filter_alloc_input req = {0};
5033         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5034
5035         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
5036         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5037         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5038                 req.flags |=
5039                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
5040         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
5041         req.enables =
5042                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5043                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5044                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5045         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
5046         req.l2_addr_mask[0] = 0xff;
5047         req.l2_addr_mask[1] = 0xff;
5048         req.l2_addr_mask[2] = 0xff;
5049         req.l2_addr_mask[3] = 0xff;
5050         req.l2_addr_mask[4] = 0xff;
5051         req.l2_addr_mask[5] = 0xff;
5052
5053         mutex_lock(&bp->hwrm_cmd_lock);
5054         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5055         if (!rc)
5056                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
5057                                                         resp->l2_filter_id;
5058         mutex_unlock(&bp->hwrm_cmd_lock);
5059         return rc;
5060 }
5061
5062 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5063 {
5064         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5065         int rc = 0;
5066
5067         /* Any associated ntuple filters will also be cleared by firmware. */
5068         mutex_lock(&bp->hwrm_cmd_lock);
5069         for (i = 0; i < num_of_vnics; i++) {
5070                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5071
5072                 for (j = 0; j < vnic->uc_filter_count; j++) {
5073                         struct hwrm_cfa_l2_filter_free_input req = {0};
5074
5075                         bnxt_hwrm_cmd_hdr_init(bp, &req,
5076                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
5077
5078                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
5079
5080                         rc = _hwrm_send_message(bp, &req, sizeof(req),
5081                                                 HWRM_CMD_TIMEOUT);
5082                 }
5083                 vnic->uc_filter_count = 0;
5084         }
5085         mutex_unlock(&bp->hwrm_cmd_lock);
5086
5087         return rc;
5088 }
5089
5090 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5091 {
5092         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5093         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
5094         struct hwrm_vnic_tpa_cfg_input req = {0};
5095
5096         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5097                 return 0;
5098
5099         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
5100
5101         if (tpa_flags) {
5102                 u16 mss = bp->dev->mtu - 40;
5103                 u32 nsegs, n, segs = 0, flags;
5104
5105                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5106                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5107                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5108                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5109                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5110                 if (tpa_flags & BNXT_FLAG_GRO)
5111                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5112
5113                 req.flags = cpu_to_le32(flags);
5114
5115                 req.enables =
5116                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5117                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5118                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5119
5120                 /* Number of segs are log2 units, and first packet is not
5121                  * included as part of this units.
5122                  */
5123                 if (mss <= BNXT_RX_PAGE_SIZE) {
5124                         n = BNXT_RX_PAGE_SIZE / mss;
5125                         nsegs = (MAX_SKB_FRAGS - 1) * n;
5126                 } else {
5127                         n = mss / BNXT_RX_PAGE_SIZE;
5128                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
5129                                 n++;
5130                         nsegs = (MAX_SKB_FRAGS - n) / n;
5131                 }
5132
5133                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5134                         segs = MAX_TPA_SEGS_P5;
5135                         max_aggs = bp->max_tpa;
5136                 } else {
5137                         segs = ilog2(nsegs);
5138                 }
5139                 req.max_agg_segs = cpu_to_le16(segs);
5140                 req.max_aggs = cpu_to_le16(max_aggs);
5141
5142                 req.min_agg_len = cpu_to_le32(512);
5143         }
5144         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5145
5146         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5147 }
5148
5149 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5150 {
5151         struct bnxt_ring_grp_info *grp_info;
5152
5153         grp_info = &bp->grp_info[ring->grp_idx];
5154         return grp_info->cp_fw_ring_id;
5155 }
5156
5157 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5158 {
5159         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5160                 struct bnxt_napi *bnapi = rxr->bnapi;
5161                 struct bnxt_cp_ring_info *cpr;
5162
5163                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5164                 return cpr->cp_ring_struct.fw_ring_id;
5165         } else {
5166                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5167         }
5168 }
5169
5170 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5171 {
5172         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5173                 struct bnxt_napi *bnapi = txr->bnapi;
5174                 struct bnxt_cp_ring_info *cpr;
5175
5176                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5177                 return cpr->cp_ring_struct.fw_ring_id;
5178         } else {
5179                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5180         }
5181 }
5182
5183 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5184 {
5185         int entries;
5186
5187         if (bp->flags & BNXT_FLAG_CHIP_P5)
5188                 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5189         else
5190                 entries = HW_HASH_INDEX_SIZE;
5191
5192         bp->rss_indir_tbl_entries = entries;
5193         bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5194                                           GFP_KERNEL);
5195         if (!bp->rss_indir_tbl)
5196                 return -ENOMEM;
5197         return 0;
5198 }
5199
5200 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5201 {
5202         u16 max_rings, max_entries, pad, i;
5203
5204         if (!bp->rx_nr_rings)
5205                 return;
5206
5207         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5208                 max_rings = bp->rx_nr_rings - 1;
5209         else
5210                 max_rings = bp->rx_nr_rings;
5211
5212         max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5213
5214         for (i = 0; i < max_entries; i++)
5215                 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5216
5217         pad = bp->rss_indir_tbl_entries - max_entries;
5218         if (pad)
5219                 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5220 }
5221
5222 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5223 {
5224         u16 i, tbl_size, max_ring = 0;
5225
5226         if (!bp->rss_indir_tbl)
5227                 return 0;
5228
5229         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5230         for (i = 0; i < tbl_size; i++)
5231                 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5232         return max_ring;
5233 }
5234
5235 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5236 {
5237         if (bp->flags & BNXT_FLAG_CHIP_P5)
5238                 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5239         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5240                 return 2;
5241         return 1;
5242 }
5243
5244 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5245 {
5246         bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5247         u16 i, j;
5248
5249         /* Fill the RSS indirection table with ring group ids */
5250         for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5251                 if (!no_rss)
5252                         j = bp->rss_indir_tbl[i];
5253                 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5254         }
5255 }
5256
5257 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5258                                       struct bnxt_vnic_info *vnic)
5259 {
5260         __le16 *ring_tbl = vnic->rss_table;
5261         struct bnxt_rx_ring_info *rxr;
5262         u16 tbl_size, i;
5263
5264         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5265
5266         for (i = 0; i < tbl_size; i++) {
5267                 u16 ring_id, j;
5268
5269                 j = bp->rss_indir_tbl[i];
5270                 rxr = &bp->rx_ring[j];
5271
5272                 ring_id = rxr->rx_ring_struct.fw_ring_id;
5273                 *ring_tbl++ = cpu_to_le16(ring_id);
5274                 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5275                 *ring_tbl++ = cpu_to_le16(ring_id);
5276         }
5277 }
5278
5279 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5280 {
5281         if (bp->flags & BNXT_FLAG_CHIP_P5)
5282                 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5283         else
5284                 __bnxt_fill_hw_rss_tbl(bp, vnic);
5285 }
5286
5287 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5288 {
5289         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5290         struct hwrm_vnic_rss_cfg_input req = {0};
5291
5292         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5293             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5294                 return 0;
5295
5296         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5297         if (set_rss) {
5298                 bnxt_fill_hw_rss_tbl(bp, vnic);
5299                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5300                 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5301                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5302                 req.hash_key_tbl_addr =
5303                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
5304         }
5305         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5306         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5307 }
5308
5309 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5310 {
5311         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5312         struct hwrm_vnic_rss_cfg_input req = {0};
5313         dma_addr_t ring_tbl_map;
5314         u32 i, nr_ctxs;
5315
5316         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5317         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5318         if (!set_rss) {
5319                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5320                 return 0;
5321         }
5322         bnxt_fill_hw_rss_tbl(bp, vnic);
5323         req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5324         req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5325         req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5326         ring_tbl_map = vnic->rss_table_dma_addr;
5327         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5328         for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5329                 int rc;
5330
5331                 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5332                 req.ring_table_pair_index = i;
5333                 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5334                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5335                 if (rc)
5336                         return rc;
5337         }
5338         return 0;
5339 }
5340
5341 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5342 {
5343         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5344         struct hwrm_vnic_plcmodes_cfg_input req = {0};
5345
5346         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5347         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5348                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5349                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5350         req.enables =
5351                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5352                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5353         /* thresholds not implemented in firmware yet */
5354         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5355         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5356         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5357         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5358 }
5359
5360 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5361                                         u16 ctx_idx)
5362 {
5363         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
5364
5365         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5366         req.rss_cos_lb_ctx_id =
5367                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5368
5369         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5370         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5371 }
5372
5373 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5374 {
5375         int i, j;
5376
5377         for (i = 0; i < bp->nr_vnics; i++) {
5378                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5379
5380                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5381                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5382                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5383                 }
5384         }
5385         bp->rsscos_nr_ctxs = 0;
5386 }
5387
5388 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5389 {
5390         int rc;
5391         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
5392         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
5393                                                 bp->hwrm_cmd_resp_addr;
5394
5395         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5396                                -1);
5397
5398         mutex_lock(&bp->hwrm_cmd_lock);
5399         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5400         if (!rc)
5401                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5402                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
5403         mutex_unlock(&bp->hwrm_cmd_lock);
5404
5405         return rc;
5406 }
5407
5408 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5409 {
5410         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5411                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5412         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5413 }
5414
5415 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5416 {
5417         unsigned int ring = 0, grp_idx;
5418         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5419         struct hwrm_vnic_cfg_input req = {0};
5420         u16 def_vlan = 0;
5421
5422         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5423
5424         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5425                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5426
5427                 req.default_rx_ring_id =
5428                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5429                 req.default_cmpl_ring_id =
5430                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5431                 req.enables =
5432                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5433                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5434                 goto vnic_mru;
5435         }
5436         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5437         /* Only RSS support for now TBD: COS & LB */
5438         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5439                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5440                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5441                                            VNIC_CFG_REQ_ENABLES_MRU);
5442         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5443                 req.rss_rule =
5444                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5445                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5446                                            VNIC_CFG_REQ_ENABLES_MRU);
5447                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5448         } else {
5449                 req.rss_rule = cpu_to_le16(0xffff);
5450         }
5451
5452         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5453             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5454                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5455                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5456         } else {
5457                 req.cos_rule = cpu_to_le16(0xffff);
5458         }
5459
5460         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5461                 ring = 0;
5462         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5463                 ring = vnic_id - 1;
5464         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5465                 ring = bp->rx_nr_rings - 1;
5466
5467         grp_idx = bp->rx_ring[ring].bnapi->index;
5468         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5469         req.lb_rule = cpu_to_le16(0xffff);
5470 vnic_mru:
5471         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5472
5473         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5474 #ifdef CONFIG_BNXT_SRIOV
5475         if (BNXT_VF(bp))
5476                 def_vlan = bp->vf.vlan;
5477 #endif
5478         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5479                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5480         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5481                 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5482
5483         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5484 }
5485
5486 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5487 {
5488         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5489                 struct hwrm_vnic_free_input req = {0};
5490
5491                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5492                 req.vnic_id =
5493                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5494
5495                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5496                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5497         }
5498 }
5499
5500 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5501 {
5502         u16 i;
5503
5504         for (i = 0; i < bp->nr_vnics; i++)
5505                 bnxt_hwrm_vnic_free_one(bp, i);
5506 }
5507
5508 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5509                                 unsigned int start_rx_ring_idx,
5510                                 unsigned int nr_rings)
5511 {
5512         int rc = 0;
5513         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5514         struct hwrm_vnic_alloc_input req = {0};
5515         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5516         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5517
5518         if (bp->flags & BNXT_FLAG_CHIP_P5)
5519                 goto vnic_no_ring_grps;
5520
5521         /* map ring groups to this vnic */
5522         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5523                 grp_idx = bp->rx_ring[i].bnapi->index;
5524                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5525                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5526                                    j, nr_rings);
5527                         break;
5528                 }
5529                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5530         }
5531
5532 vnic_no_ring_grps:
5533         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5534                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5535         if (vnic_id == 0)
5536                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5537
5538         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5539
5540         mutex_lock(&bp->hwrm_cmd_lock);
5541         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5542         if (!rc)
5543                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5544         mutex_unlock(&bp->hwrm_cmd_lock);
5545         return rc;
5546 }
5547
5548 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5549 {
5550         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5551         struct hwrm_vnic_qcaps_input req = {0};
5552         int rc;
5553
5554         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5555         bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5556         if (bp->hwrm_spec_code < 0x10600)
5557                 return 0;
5558
5559         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5560         mutex_lock(&bp->hwrm_cmd_lock);
5561         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5562         if (!rc) {
5563                 u32 flags = le32_to_cpu(resp->flags);
5564
5565                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5566                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5567                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5568                 if (flags &
5569                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5570                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5571
5572                 /* Older P5 fw before EXT_HW_STATS support did not set
5573                  * VLAN_STRIP_CAP properly.
5574                  */
5575                 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5576                     (BNXT_CHIP_P5_THOR(bp) &&
5577                      !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5578                         bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5579                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5580                 if (bp->max_tpa_v2) {
5581                         if (BNXT_CHIP_P5_THOR(bp))
5582                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5583                         else
5584                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5585                 }
5586         }
5587         mutex_unlock(&bp->hwrm_cmd_lock);
5588         return rc;
5589 }
5590
5591 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5592 {
5593         u16 i;
5594         u32 rc = 0;
5595
5596         if (bp->flags & BNXT_FLAG_CHIP_P5)
5597                 return 0;
5598
5599         mutex_lock(&bp->hwrm_cmd_lock);
5600         for (i = 0; i < bp->rx_nr_rings; i++) {
5601                 struct hwrm_ring_grp_alloc_input req = {0};
5602                 struct hwrm_ring_grp_alloc_output *resp =
5603                                         bp->hwrm_cmd_resp_addr;
5604                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5605
5606                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5607
5608                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5609                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5610                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5611                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5612
5613                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5614                                         HWRM_CMD_TIMEOUT);
5615                 if (rc)
5616                         break;
5617
5618                 bp->grp_info[grp_idx].fw_grp_id =
5619                         le32_to_cpu(resp->ring_group_id);
5620         }
5621         mutex_unlock(&bp->hwrm_cmd_lock);
5622         return rc;
5623 }
5624
5625 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5626 {
5627         u16 i;
5628         struct hwrm_ring_grp_free_input req = {0};
5629
5630         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5631                 return;
5632
5633         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5634
5635         mutex_lock(&bp->hwrm_cmd_lock);
5636         for (i = 0; i < bp->cp_nr_rings; i++) {
5637                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5638                         continue;
5639                 req.ring_group_id =
5640                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
5641
5642                 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5643                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5644         }
5645         mutex_unlock(&bp->hwrm_cmd_lock);
5646 }
5647
5648 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5649                                     struct bnxt_ring_struct *ring,
5650                                     u32 ring_type, u32 map_index)
5651 {
5652         int rc = 0, err = 0;
5653         struct hwrm_ring_alloc_input req = {0};
5654         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5655         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5656         struct bnxt_ring_grp_info *grp_info;
5657         u16 ring_id;
5658
5659         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5660
5661         req.enables = 0;
5662         if (rmem->nr_pages > 1) {
5663                 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5664                 /* Page size is in log2 units */
5665                 req.page_size = BNXT_PAGE_SHIFT;
5666                 req.page_tbl_depth = 1;
5667         } else {
5668                 req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5669         }
5670         req.fbo = 0;
5671         /* Association of ring index with doorbell index and MSIX number */
5672         req.logical_id = cpu_to_le16(map_index);
5673
5674         switch (ring_type) {
5675         case HWRM_RING_ALLOC_TX: {
5676                 struct bnxt_tx_ring_info *txr;
5677
5678                 txr = container_of(ring, struct bnxt_tx_ring_info,
5679                                    tx_ring_struct);
5680                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5681                 /* Association of transmit ring with completion ring */
5682                 grp_info = &bp->grp_info[ring->grp_idx];
5683                 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5684                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5685                 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5686                 req.queue_id = cpu_to_le16(ring->queue_id);
5687                 break;
5688         }
5689         case HWRM_RING_ALLOC_RX:
5690                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5691                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5692                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5693                         u16 flags = 0;
5694
5695                         /* Association of rx ring with stats context */
5696                         grp_info = &bp->grp_info[ring->grp_idx];
5697                         req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5698                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5699                         req.enables |= cpu_to_le32(
5700                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5701                         if (NET_IP_ALIGN == 2)
5702                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5703                         req.flags = cpu_to_le16(flags);
5704                 }
5705                 break;
5706         case HWRM_RING_ALLOC_AGG:
5707                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5708                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5709                         /* Association of agg ring with rx ring */
5710                         grp_info = &bp->grp_info[ring->grp_idx];
5711                         req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5712                         req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5713                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5714                         req.enables |= cpu_to_le32(
5715                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5716                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5717                 } else {
5718                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5719                 }
5720                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5721                 break;
5722         case HWRM_RING_ALLOC_CMPL:
5723                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5724                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5725                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5726                         /* Association of cp ring with nq */
5727                         grp_info = &bp->grp_info[map_index];
5728                         req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5729                         req.cq_handle = cpu_to_le64(ring->handle);
5730                         req.enables |= cpu_to_le32(
5731                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5732                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5733                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5734                 }
5735                 break;
5736         case HWRM_RING_ALLOC_NQ:
5737                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5738                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5739                 if (bp->flags & BNXT_FLAG_USING_MSIX)
5740                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5741                 break;
5742         default:
5743                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5744                            ring_type);
5745                 return -1;
5746         }
5747
5748         mutex_lock(&bp->hwrm_cmd_lock);
5749         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5750         err = le16_to_cpu(resp->error_code);
5751         ring_id = le16_to_cpu(resp->ring_id);
5752         mutex_unlock(&bp->hwrm_cmd_lock);
5753
5754         if (rc || err) {
5755                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5756                            ring_type, rc, err);
5757                 return -EIO;
5758         }
5759         ring->fw_ring_id = ring_id;
5760         return rc;
5761 }
5762
5763 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5764 {
5765         int rc;
5766
5767         if (BNXT_PF(bp)) {
5768                 struct hwrm_func_cfg_input req = {0};
5769
5770                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5771                 req.fid = cpu_to_le16(0xffff);
5772                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5773                 req.async_event_cr = cpu_to_le16(idx);
5774                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5775         } else {
5776                 struct hwrm_func_vf_cfg_input req = {0};
5777
5778                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5779                 req.enables =
5780                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5781                 req.async_event_cr = cpu_to_le16(idx);
5782                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5783         }
5784         return rc;
5785 }
5786
5787 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5788                         u32 map_idx, u32 xid)
5789 {
5790         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5791                 if (BNXT_PF(bp))
5792                         db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5793                 else
5794                         db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5795                 switch (ring_type) {
5796                 case HWRM_RING_ALLOC_TX:
5797                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5798                         break;
5799                 case HWRM_RING_ALLOC_RX:
5800                 case HWRM_RING_ALLOC_AGG:
5801                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5802                         break;
5803                 case HWRM_RING_ALLOC_CMPL:
5804                         db->db_key64 = DBR_PATH_L2;
5805                         break;
5806                 case HWRM_RING_ALLOC_NQ:
5807                         db->db_key64 = DBR_PATH_L2;
5808                         break;
5809                 }
5810                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5811         } else {
5812                 db->doorbell = bp->bar1 + map_idx * 0x80;
5813                 switch (ring_type) {
5814                 case HWRM_RING_ALLOC_TX:
5815                         db->db_key32 = DB_KEY_TX;
5816                         break;
5817                 case HWRM_RING_ALLOC_RX:
5818                 case HWRM_RING_ALLOC_AGG:
5819                         db->db_key32 = DB_KEY_RX;
5820                         break;
5821                 case HWRM_RING_ALLOC_CMPL:
5822                         db->db_key32 = DB_KEY_CP;
5823                         break;
5824                 }
5825         }
5826 }
5827
5828 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5829 {
5830         bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5831         int i, rc = 0;
5832         u32 type;
5833
5834         if (bp->flags & BNXT_FLAG_CHIP_P5)
5835                 type = HWRM_RING_ALLOC_NQ;
5836         else
5837                 type = HWRM_RING_ALLOC_CMPL;
5838         for (i = 0; i < bp->cp_nr_rings; i++) {
5839                 struct bnxt_napi *bnapi = bp->bnapi[i];
5840                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5841                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5842                 u32 map_idx = ring->map_idx;
5843                 unsigned int vector;
5844
5845                 vector = bp->irq_tbl[map_idx].vector;
5846                 disable_irq_nosync(vector);
5847                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5848                 if (rc) {
5849                         enable_irq(vector);
5850                         goto err_out;
5851                 }
5852                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5853                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5854                 enable_irq(vector);
5855                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5856
5857                 if (!i) {
5858                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5859                         if (rc)
5860                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5861                 }
5862         }
5863
5864         type = HWRM_RING_ALLOC_TX;
5865         for (i = 0; i < bp->tx_nr_rings; i++) {
5866                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5867                 struct bnxt_ring_struct *ring;
5868                 u32 map_idx;
5869
5870                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5871                         struct bnxt_napi *bnapi = txr->bnapi;
5872                         struct bnxt_cp_ring_info *cpr, *cpr2;
5873                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5874
5875                         cpr = &bnapi->cp_ring;
5876                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5877                         ring = &cpr2->cp_ring_struct;
5878                         ring->handle = BNXT_TX_HDL;
5879                         map_idx = bnapi->index;
5880                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5881                         if (rc)
5882                                 goto err_out;
5883                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5884                                     ring->fw_ring_id);
5885                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5886                 }
5887                 ring = &txr->tx_ring_struct;
5888                 map_idx = i;
5889                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5890                 if (rc)
5891                         goto err_out;
5892                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5893         }
5894
5895         type = HWRM_RING_ALLOC_RX;
5896         for (i = 0; i < bp->rx_nr_rings; i++) {
5897                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5898                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5899                 struct bnxt_napi *bnapi = rxr->bnapi;
5900                 u32 map_idx = bnapi->index;
5901
5902                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5903                 if (rc)
5904                         goto err_out;
5905                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5906                 /* If we have agg rings, post agg buffers first. */
5907                 if (!agg_rings)
5908                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5909                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5910                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5911                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5912                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5913                         struct bnxt_cp_ring_info *cpr2;
5914
5915                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5916                         ring = &cpr2->cp_ring_struct;
5917                         ring->handle = BNXT_RX_HDL;
5918                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5919                         if (rc)
5920                                 goto err_out;
5921                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5922                                     ring->fw_ring_id);
5923                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5924                 }
5925         }
5926
5927         if (agg_rings) {
5928                 type = HWRM_RING_ALLOC_AGG;
5929                 for (i = 0; i < bp->rx_nr_rings; i++) {
5930                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5931                         struct bnxt_ring_struct *ring =
5932                                                 &rxr->rx_agg_ring_struct;
5933                         u32 grp_idx = ring->grp_idx;
5934                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5935
5936                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5937                         if (rc)
5938                                 goto err_out;
5939
5940                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5941                                     ring->fw_ring_id);
5942                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5943                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5944                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5945                 }
5946         }
5947 err_out:
5948         return rc;
5949 }
5950
5951 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5952                                    struct bnxt_ring_struct *ring,
5953                                    u32 ring_type, int cmpl_ring_id)
5954 {
5955         int rc;
5956         struct hwrm_ring_free_input req = {0};
5957         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5958         u16 error_code;
5959
5960         if (BNXT_NO_FW_ACCESS(bp))
5961                 return 0;
5962
5963         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5964         req.ring_type = ring_type;
5965         req.ring_id = cpu_to_le16(ring->fw_ring_id);
5966
5967         mutex_lock(&bp->hwrm_cmd_lock);
5968         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5969         error_code = le16_to_cpu(resp->error_code);
5970         mutex_unlock(&bp->hwrm_cmd_lock);
5971
5972         if (rc || error_code) {
5973                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5974                            ring_type, rc, error_code);
5975                 return -EIO;
5976         }
5977         return 0;
5978 }
5979
5980 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5981 {
5982         u32 type;
5983         int i;
5984
5985         if (!bp->bnapi)
5986                 return;
5987
5988         for (i = 0; i < bp->tx_nr_rings; i++) {
5989                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5990                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5991
5992                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5993                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5994
5995                         hwrm_ring_free_send_msg(bp, ring,
5996                                                 RING_FREE_REQ_RING_TYPE_TX,
5997                                                 close_path ? cmpl_ring_id :
5998                                                 INVALID_HW_RING_ID);
5999                         ring->fw_ring_id = INVALID_HW_RING_ID;
6000                 }
6001         }
6002
6003         for (i = 0; i < bp->rx_nr_rings; i++) {
6004                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6005                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6006                 u32 grp_idx = rxr->bnapi->index;
6007
6008                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6009                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6010
6011                         hwrm_ring_free_send_msg(bp, ring,
6012                                                 RING_FREE_REQ_RING_TYPE_RX,
6013                                                 close_path ? cmpl_ring_id :
6014                                                 INVALID_HW_RING_ID);
6015                         ring->fw_ring_id = INVALID_HW_RING_ID;
6016                         bp->grp_info[grp_idx].rx_fw_ring_id =
6017                                 INVALID_HW_RING_ID;
6018                 }
6019         }
6020
6021         if (bp->flags & BNXT_FLAG_CHIP_P5)
6022                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
6023         else
6024                 type = RING_FREE_REQ_RING_TYPE_RX;
6025         for (i = 0; i < bp->rx_nr_rings; i++) {
6026                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6027                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
6028                 u32 grp_idx = rxr->bnapi->index;
6029
6030                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6031                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6032
6033                         hwrm_ring_free_send_msg(bp, ring, type,
6034                                                 close_path ? cmpl_ring_id :
6035                                                 INVALID_HW_RING_ID);
6036                         ring->fw_ring_id = INVALID_HW_RING_ID;
6037                         bp->grp_info[grp_idx].agg_fw_ring_id =
6038                                 INVALID_HW_RING_ID;
6039                 }
6040         }
6041
6042         /* The completion rings are about to be freed.  After that the
6043          * IRQ doorbell will not work anymore.  So we need to disable
6044          * IRQ here.
6045          */
6046         bnxt_disable_int_sync(bp);
6047
6048         if (bp->flags & BNXT_FLAG_CHIP_P5)
6049                 type = RING_FREE_REQ_RING_TYPE_NQ;
6050         else
6051                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
6052         for (i = 0; i < bp->cp_nr_rings; i++) {
6053                 struct bnxt_napi *bnapi = bp->bnapi[i];
6054                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6055                 struct bnxt_ring_struct *ring;
6056                 int j;
6057
6058                 for (j = 0; j < 2; j++) {
6059                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
6060
6061                         if (cpr2) {
6062                                 ring = &cpr2->cp_ring_struct;
6063                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
6064                                         continue;
6065                                 hwrm_ring_free_send_msg(bp, ring,
6066                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
6067                                         INVALID_HW_RING_ID);
6068                                 ring->fw_ring_id = INVALID_HW_RING_ID;
6069                         }
6070                 }
6071                 ring = &cpr->cp_ring_struct;
6072                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6073                         hwrm_ring_free_send_msg(bp, ring, type,
6074                                                 INVALID_HW_RING_ID);
6075                         ring->fw_ring_id = INVALID_HW_RING_ID;
6076                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
6077                 }
6078         }
6079 }
6080
6081 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6082                            bool shared);
6083
6084 static int bnxt_hwrm_get_rings(struct bnxt *bp)
6085 {
6086         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6087         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6088         struct hwrm_func_qcfg_input req = {0};
6089         int rc;
6090
6091         if (bp->hwrm_spec_code < 0x10601)
6092                 return 0;
6093
6094         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6095         req.fid = cpu_to_le16(0xffff);
6096         mutex_lock(&bp->hwrm_cmd_lock);
6097         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6098         if (rc) {
6099                 mutex_unlock(&bp->hwrm_cmd_lock);
6100                 return rc;
6101         }
6102
6103         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6104         if (BNXT_NEW_RM(bp)) {
6105                 u16 cp, stats;
6106
6107                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6108                 hw_resc->resv_hw_ring_grps =
6109                         le32_to_cpu(resp->alloc_hw_ring_grps);
6110                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6111                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6112                 stats = le16_to_cpu(resp->alloc_stat_ctx);
6113                 hw_resc->resv_irqs = cp;
6114                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6115                         int rx = hw_resc->resv_rx_rings;
6116                         int tx = hw_resc->resv_tx_rings;
6117
6118                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6119                                 rx >>= 1;
6120                         if (cp < (rx + tx)) {
6121                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6122                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6123                                         rx <<= 1;
6124                                 hw_resc->resv_rx_rings = rx;
6125                                 hw_resc->resv_tx_rings = tx;
6126                         }
6127                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6128                         hw_resc->resv_hw_ring_grps = rx;
6129                 }
6130                 hw_resc->resv_cp_rings = cp;
6131                 hw_resc->resv_stat_ctxs = stats;
6132         }
6133         mutex_unlock(&bp->hwrm_cmd_lock);
6134         return 0;
6135 }
6136
6137 /* Caller must hold bp->hwrm_cmd_lock */
6138 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6139 {
6140         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6141         struct hwrm_func_qcfg_input req = {0};
6142         int rc;
6143
6144         if (bp->hwrm_spec_code < 0x10601)
6145                 return 0;
6146
6147         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6148         req.fid = cpu_to_le16(fid);
6149         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6150         if (!rc)
6151                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6152
6153         return rc;
6154 }
6155
6156 static bool bnxt_rfs_supported(struct bnxt *bp);
6157
6158 static void
6159 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
6160                              int tx_rings, int rx_rings, int ring_grps,
6161                              int cp_rings, int stats, int vnics)
6162 {
6163         u32 enables = 0;
6164
6165         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
6166         req->fid = cpu_to_le16(0xffff);
6167         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6168         req->num_tx_rings = cpu_to_le16(tx_rings);
6169         if (BNXT_NEW_RM(bp)) {
6170                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6171                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6172                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6173                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6174                         enables |= tx_rings + ring_grps ?
6175                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6176                         enables |= rx_rings ?
6177                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6178                 } else {
6179                         enables |= cp_rings ?
6180                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6181                         enables |= ring_grps ?
6182                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6183                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6184                 }
6185                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6186
6187                 req->num_rx_rings = cpu_to_le16(rx_rings);
6188                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6189                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6190                         req->num_msix = cpu_to_le16(cp_rings);
6191                         req->num_rsscos_ctxs =
6192                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6193                 } else {
6194                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
6195                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6196                         req->num_rsscos_ctxs = cpu_to_le16(1);
6197                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6198                             bnxt_rfs_supported(bp))
6199                                 req->num_rsscos_ctxs =
6200                                         cpu_to_le16(ring_grps + 1);
6201                 }
6202                 req->num_stat_ctxs = cpu_to_le16(stats);
6203                 req->num_vnics = cpu_to_le16(vnics);
6204         }
6205         req->enables = cpu_to_le32(enables);
6206 }
6207
6208 static void
6209 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
6210                              struct hwrm_func_vf_cfg_input *req, int tx_rings,
6211                              int rx_rings, int ring_grps, int cp_rings,
6212                              int stats, int vnics)
6213 {
6214         u32 enables = 0;
6215
6216         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
6217         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6218         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6219                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6220         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6221         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6222                 enables |= tx_rings + ring_grps ?
6223                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6224         } else {
6225                 enables |= cp_rings ?
6226                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6227                 enables |= ring_grps ?
6228                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6229         }
6230         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6231         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6232
6233         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6234         req->num_tx_rings = cpu_to_le16(tx_rings);
6235         req->num_rx_rings = cpu_to_le16(rx_rings);
6236         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6237                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6238                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6239         } else {
6240                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6241                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6242                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6243         }
6244         req->num_stat_ctxs = cpu_to_le16(stats);
6245         req->num_vnics = cpu_to_le16(vnics);
6246
6247         req->enables = cpu_to_le32(enables);
6248 }
6249
6250 static int
6251 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6252                            int ring_grps, int cp_rings, int stats, int vnics)
6253 {
6254         struct hwrm_func_cfg_input req = {0};
6255         int rc;
6256
6257         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6258                                      cp_rings, stats, vnics);
6259         if (!req.enables)
6260                 return 0;
6261
6262         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6263         if (rc)
6264                 return rc;
6265
6266         if (bp->hwrm_spec_code < 0x10601)
6267                 bp->hw_resc.resv_tx_rings = tx_rings;
6268
6269         return bnxt_hwrm_get_rings(bp);
6270 }
6271
6272 static int
6273 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6274                            int ring_grps, int cp_rings, int stats, int vnics)
6275 {
6276         struct hwrm_func_vf_cfg_input req = {0};
6277         int rc;
6278
6279         if (!BNXT_NEW_RM(bp)) {
6280                 bp->hw_resc.resv_tx_rings = tx_rings;
6281                 return 0;
6282         }
6283
6284         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6285                                      cp_rings, stats, vnics);
6286         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6287         if (rc)
6288                 return rc;
6289
6290         return bnxt_hwrm_get_rings(bp);
6291 }
6292
6293 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6294                                    int cp, int stat, int vnic)
6295 {
6296         if (BNXT_PF(bp))
6297                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6298                                                   vnic);
6299         else
6300                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6301                                                   vnic);
6302 }
6303
6304 int bnxt_nq_rings_in_use(struct bnxt *bp)
6305 {
6306         int cp = bp->cp_nr_rings;
6307         int ulp_msix, ulp_base;
6308
6309         ulp_msix = bnxt_get_ulp_msix_num(bp);
6310         if (ulp_msix) {
6311                 ulp_base = bnxt_get_ulp_msix_base(bp);
6312                 cp += ulp_msix;
6313                 if ((ulp_base + ulp_msix) > cp)
6314                         cp = ulp_base + ulp_msix;
6315         }
6316         return cp;
6317 }
6318
6319 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6320 {
6321         int cp;
6322
6323         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6324                 return bnxt_nq_rings_in_use(bp);
6325
6326         cp = bp->tx_nr_rings + bp->rx_nr_rings;
6327         return cp;
6328 }
6329
6330 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6331 {
6332         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6333         int cp = bp->cp_nr_rings;
6334
6335         if (!ulp_stat)
6336                 return cp;
6337
6338         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6339                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6340
6341         return cp + ulp_stat;
6342 }
6343
6344 /* Check if a default RSS map needs to be setup.  This function is only
6345  * used on older firmware that does not require reserving RX rings.
6346  */
6347 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6348 {
6349         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6350
6351         /* The RSS map is valid for RX rings set to resv_rx_rings */
6352         if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6353                 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6354                 if (!netif_is_rxfh_configured(bp->dev))
6355                         bnxt_set_dflt_rss_indir_tbl(bp);
6356         }
6357 }
6358
6359 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6360 {
6361         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6362         int cp = bnxt_cp_rings_in_use(bp);
6363         int nq = bnxt_nq_rings_in_use(bp);
6364         int rx = bp->rx_nr_rings, stat;
6365         int vnic = 1, grp = rx;
6366
6367         if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6368             bp->hwrm_spec_code >= 0x10601)
6369                 return true;
6370
6371         /* Old firmware does not need RX ring reservations but we still
6372          * need to setup a default RSS map when needed.  With new firmware
6373          * we go through RX ring reservations first and then set up the
6374          * RSS map for the successfully reserved RX rings when needed.
6375          */
6376         if (!BNXT_NEW_RM(bp)) {
6377                 bnxt_check_rss_tbl_no_rmgr(bp);
6378                 return false;
6379         }
6380         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6381                 vnic = rx + 1;
6382         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6383                 rx <<= 1;
6384         stat = bnxt_get_func_stat_ctxs(bp);
6385         if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6386             hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6387             (hw_resc->resv_hw_ring_grps != grp &&
6388              !(bp->flags & BNXT_FLAG_CHIP_P5)))
6389                 return true;
6390         if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6391             hw_resc->resv_irqs != nq)
6392                 return true;
6393         return false;
6394 }
6395
6396 static int __bnxt_reserve_rings(struct bnxt *bp)
6397 {
6398         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6399         int cp = bnxt_nq_rings_in_use(bp);
6400         int tx = bp->tx_nr_rings;
6401         int rx = bp->rx_nr_rings;
6402         int grp, rx_rings, rc;
6403         int vnic = 1, stat;
6404         bool sh = false;
6405
6406         if (!bnxt_need_reserve_rings(bp))
6407                 return 0;
6408
6409         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6410                 sh = true;
6411         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6412                 vnic = rx + 1;
6413         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6414                 rx <<= 1;
6415         grp = bp->rx_nr_rings;
6416         stat = bnxt_get_func_stat_ctxs(bp);
6417
6418         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6419         if (rc)
6420                 return rc;
6421
6422         tx = hw_resc->resv_tx_rings;
6423         if (BNXT_NEW_RM(bp)) {
6424                 rx = hw_resc->resv_rx_rings;
6425                 cp = hw_resc->resv_irqs;
6426                 grp = hw_resc->resv_hw_ring_grps;
6427                 vnic = hw_resc->resv_vnics;
6428                 stat = hw_resc->resv_stat_ctxs;
6429         }
6430
6431         rx_rings = rx;
6432         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6433                 if (rx >= 2) {
6434                         rx_rings = rx >> 1;
6435                 } else {
6436                         if (netif_running(bp->dev))
6437                                 return -ENOMEM;
6438
6439                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6440                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6441                         bp->dev->hw_features &= ~NETIF_F_LRO;
6442                         bp->dev->features &= ~NETIF_F_LRO;
6443                         bnxt_set_ring_params(bp);
6444                 }
6445         }
6446         rx_rings = min_t(int, rx_rings, grp);
6447         cp = min_t(int, cp, bp->cp_nr_rings);
6448         if (stat > bnxt_get_ulp_stat_ctxs(bp))
6449                 stat -= bnxt_get_ulp_stat_ctxs(bp);
6450         cp = min_t(int, cp, stat);
6451         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6452         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6453                 rx = rx_rings << 1;
6454         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6455         bp->tx_nr_rings = tx;
6456
6457         /* If we cannot reserve all the RX rings, reset the RSS map only
6458          * if absolutely necessary
6459          */
6460         if (rx_rings != bp->rx_nr_rings) {
6461                 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6462                             rx_rings, bp->rx_nr_rings);
6463                 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6464                     (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6465                      bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6466                      bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6467                         netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6468                         bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6469                 }
6470         }
6471         bp->rx_nr_rings = rx_rings;
6472         bp->cp_nr_rings = cp;
6473
6474         if (!tx || !rx || !cp || !grp || !vnic || !stat)
6475                 return -ENOMEM;
6476
6477         if (!netif_is_rxfh_configured(bp->dev))
6478                 bnxt_set_dflt_rss_indir_tbl(bp);
6479
6480         return rc;
6481 }
6482
6483 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6484                                     int ring_grps, int cp_rings, int stats,
6485                                     int vnics)
6486 {
6487         struct hwrm_func_vf_cfg_input req = {0};
6488         u32 flags;
6489
6490         if (!BNXT_NEW_RM(bp))
6491                 return 0;
6492
6493         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6494                                      cp_rings, stats, vnics);
6495         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6496                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6497                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6498                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6499                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6500                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6501         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6502                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6503
6504         req.flags = cpu_to_le32(flags);
6505         return hwrm_send_message_silent(bp, &req, sizeof(req),
6506                                         HWRM_CMD_TIMEOUT);
6507 }
6508
6509 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6510                                     int ring_grps, int cp_rings, int stats,
6511                                     int vnics)
6512 {
6513         struct hwrm_func_cfg_input req = {0};
6514         u32 flags;
6515
6516         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6517                                      cp_rings, stats, vnics);
6518         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6519         if (BNXT_NEW_RM(bp)) {
6520                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6521                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6522                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6523                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6524                 if (bp->flags & BNXT_FLAG_CHIP_P5)
6525                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6526                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6527                 else
6528                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6529         }
6530
6531         req.flags = cpu_to_le32(flags);
6532         return hwrm_send_message_silent(bp, &req, sizeof(req),
6533                                         HWRM_CMD_TIMEOUT);
6534 }
6535
6536 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6537                                  int ring_grps, int cp_rings, int stats,
6538                                  int vnics)
6539 {
6540         if (bp->hwrm_spec_code < 0x10801)
6541                 return 0;
6542
6543         if (BNXT_PF(bp))
6544                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6545                                                 ring_grps, cp_rings, stats,
6546                                                 vnics);
6547
6548         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6549                                         cp_rings, stats, vnics);
6550 }
6551
6552 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6553 {
6554         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6555         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6556         struct hwrm_ring_aggint_qcaps_input req = {0};
6557         int rc;
6558
6559         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6560         coal_cap->num_cmpl_dma_aggr_max = 63;
6561         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6562         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6563         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6564         coal_cap->int_lat_tmr_min_max = 65535;
6565         coal_cap->int_lat_tmr_max_max = 65535;
6566         coal_cap->num_cmpl_aggr_int_max = 65535;
6567         coal_cap->timer_units = 80;
6568
6569         if (bp->hwrm_spec_code < 0x10902)
6570                 return;
6571
6572         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6573         mutex_lock(&bp->hwrm_cmd_lock);
6574         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6575         if (!rc) {
6576                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6577                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6578                 coal_cap->num_cmpl_dma_aggr_max =
6579                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6580                 coal_cap->num_cmpl_dma_aggr_during_int_max =
6581                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6582                 coal_cap->cmpl_aggr_dma_tmr_max =
6583                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6584                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6585                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6586                 coal_cap->int_lat_tmr_min_max =
6587                         le16_to_cpu(resp->int_lat_tmr_min_max);
6588                 coal_cap->int_lat_tmr_max_max =
6589                         le16_to_cpu(resp->int_lat_tmr_max_max);
6590                 coal_cap->num_cmpl_aggr_int_max =
6591                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
6592                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6593         }
6594         mutex_unlock(&bp->hwrm_cmd_lock);
6595 }
6596
6597 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6598 {
6599         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6600
6601         return usec * 1000 / coal_cap->timer_units;
6602 }
6603
6604 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6605         struct bnxt_coal *hw_coal,
6606         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6607 {
6608         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6609         u32 cmpl_params = coal_cap->cmpl_params;
6610         u16 val, tmr, max, flags = 0;
6611
6612         max = hw_coal->bufs_per_record * 128;
6613         if (hw_coal->budget)
6614                 max = hw_coal->bufs_per_record * hw_coal->budget;
6615         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6616
6617         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6618         req->num_cmpl_aggr_int = cpu_to_le16(val);
6619
6620         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6621         req->num_cmpl_dma_aggr = cpu_to_le16(val);
6622
6623         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6624                       coal_cap->num_cmpl_dma_aggr_during_int_max);
6625         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6626
6627         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6628         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6629         req->int_lat_tmr_max = cpu_to_le16(tmr);
6630
6631         /* min timer set to 1/2 of interrupt timer */
6632         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6633                 val = tmr / 2;
6634                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6635                 req->int_lat_tmr_min = cpu_to_le16(val);
6636                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6637         }
6638
6639         /* buf timer set to 1/4 of interrupt timer */
6640         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6641         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6642
6643         if (cmpl_params &
6644             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6645                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6646                 val = clamp_t(u16, tmr, 1,
6647                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6648                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6649                 req->enables |=
6650                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6651         }
6652
6653         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6654                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6655         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6656             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6657                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6658         req->flags = cpu_to_le16(flags);
6659         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6660 }
6661
6662 /* Caller holds bp->hwrm_cmd_lock */
6663 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6664                                    struct bnxt_coal *hw_coal)
6665 {
6666         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6667         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6668         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6669         u32 nq_params = coal_cap->nq_params;
6670         u16 tmr;
6671
6672         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6673                 return 0;
6674
6675         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6676                                -1, -1);
6677         req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6678         req.flags =
6679                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6680
6681         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6682         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6683         req.int_lat_tmr_min = cpu_to_le16(tmr);
6684         req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6685         return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6686 }
6687
6688 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6689 {
6690         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6691         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6692         struct bnxt_coal coal;
6693
6694         /* Tick values in micro seconds.
6695          * 1 coal_buf x bufs_per_record = 1 completion record.
6696          */
6697         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6698
6699         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6700         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6701
6702         if (!bnapi->rx_ring)
6703                 return -ENODEV;
6704
6705         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6706                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6707
6708         bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6709
6710         req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6711
6712         return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6713                                  HWRM_CMD_TIMEOUT);
6714 }
6715
6716 int bnxt_hwrm_set_coal(struct bnxt *bp)
6717 {
6718         int i, rc = 0;
6719         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6720                                                            req_tx = {0}, *req;
6721
6722         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6723                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6724         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6725                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6726
6727         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6728         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6729
6730         mutex_lock(&bp->hwrm_cmd_lock);
6731         for (i = 0; i < bp->cp_nr_rings; i++) {
6732                 struct bnxt_napi *bnapi = bp->bnapi[i];
6733                 struct bnxt_coal *hw_coal;
6734                 u16 ring_id;
6735
6736                 req = &req_rx;
6737                 if (!bnapi->rx_ring) {
6738                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6739                         req = &req_tx;
6740                 } else {
6741                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6742                 }
6743                 req->ring_id = cpu_to_le16(ring_id);
6744
6745                 rc = _hwrm_send_message(bp, req, sizeof(*req),
6746                                         HWRM_CMD_TIMEOUT);
6747                 if (rc)
6748                         break;
6749
6750                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6751                         continue;
6752
6753                 if (bnapi->rx_ring && bnapi->tx_ring) {
6754                         req = &req_tx;
6755                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6756                         req->ring_id = cpu_to_le16(ring_id);
6757                         rc = _hwrm_send_message(bp, req, sizeof(*req),
6758                                                 HWRM_CMD_TIMEOUT);
6759                         if (rc)
6760                                 break;
6761                 }
6762                 if (bnapi->rx_ring)
6763                         hw_coal = &bp->rx_coal;
6764                 else
6765                         hw_coal = &bp->tx_coal;
6766                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6767         }
6768         mutex_unlock(&bp->hwrm_cmd_lock);
6769         return rc;
6770 }
6771
6772 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6773 {
6774         struct hwrm_stat_ctx_clr_stats_input req0 = {0};
6775         struct hwrm_stat_ctx_free_input req = {0};
6776         int i;
6777
6778         if (!bp->bnapi)
6779                 return;
6780
6781         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6782                 return;
6783
6784         bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
6785         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6786
6787         mutex_lock(&bp->hwrm_cmd_lock);
6788         for (i = 0; i < bp->cp_nr_rings; i++) {
6789                 struct bnxt_napi *bnapi = bp->bnapi[i];
6790                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6791
6792                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6793                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6794                         if (BNXT_FW_MAJ(bp) <= 20) {
6795                                 req0.stat_ctx_id = req.stat_ctx_id;
6796                                 _hwrm_send_message(bp, &req0, sizeof(req0),
6797                                                    HWRM_CMD_TIMEOUT);
6798                         }
6799                         _hwrm_send_message(bp, &req, sizeof(req),
6800                                            HWRM_CMD_TIMEOUT);
6801
6802                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6803                 }
6804         }
6805         mutex_unlock(&bp->hwrm_cmd_lock);
6806 }
6807
6808 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6809 {
6810         int rc = 0, i;
6811         struct hwrm_stat_ctx_alloc_input req = {0};
6812         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6813
6814         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6815                 return 0;
6816
6817         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6818
6819         req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6820         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6821
6822         mutex_lock(&bp->hwrm_cmd_lock);
6823         for (i = 0; i < bp->cp_nr_rings; i++) {
6824                 struct bnxt_napi *bnapi = bp->bnapi[i];
6825                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6826
6827                 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6828
6829                 rc = _hwrm_send_message(bp, &req, sizeof(req),
6830                                         HWRM_CMD_TIMEOUT);
6831                 if (rc)
6832                         break;
6833
6834                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6835
6836                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6837         }
6838         mutex_unlock(&bp->hwrm_cmd_lock);
6839         return rc;
6840 }
6841
6842 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6843 {
6844         struct hwrm_func_qcfg_input req = {0};
6845         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6846         u32 min_db_offset = 0;
6847         u16 flags;
6848         int rc;
6849
6850         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6851         req.fid = cpu_to_le16(0xffff);
6852         mutex_lock(&bp->hwrm_cmd_lock);
6853         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6854         if (rc)
6855                 goto func_qcfg_exit;
6856
6857 #ifdef CONFIG_BNXT_SRIOV
6858         if (BNXT_VF(bp)) {
6859                 struct bnxt_vf_info *vf = &bp->vf;
6860
6861                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6862         } else {
6863                 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6864         }
6865 #endif
6866         flags = le16_to_cpu(resp->flags);
6867         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6868                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6869                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6870                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6871                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6872         }
6873         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6874                 bp->flags |= BNXT_FLAG_MULTI_HOST;
6875         if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6876                 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6877
6878         switch (resp->port_partition_type) {
6879         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6880         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6881         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6882                 bp->port_partition_type = resp->port_partition_type;
6883                 break;
6884         }
6885         if (bp->hwrm_spec_code < 0x10707 ||
6886             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6887                 bp->br_mode = BRIDGE_MODE_VEB;
6888         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6889                 bp->br_mode = BRIDGE_MODE_VEPA;
6890         else
6891                 bp->br_mode = BRIDGE_MODE_UNDEF;
6892
6893         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6894         if (!bp->max_mtu)
6895                 bp->max_mtu = BNXT_MAX_MTU;
6896
6897         if (bp->db_size)
6898                 goto func_qcfg_exit;
6899
6900         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6901                 if (BNXT_PF(bp))
6902                         min_db_offset = DB_PF_OFFSET_P5;
6903                 else
6904                         min_db_offset = DB_VF_OFFSET_P5;
6905         }
6906         bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6907                                  1024);
6908         if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6909             bp->db_size <= min_db_offset)
6910                 bp->db_size = pci_resource_len(bp->pdev, 2);
6911
6912 func_qcfg_exit:
6913         mutex_unlock(&bp->hwrm_cmd_lock);
6914         return rc;
6915 }
6916
6917 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6918                         struct hwrm_func_backing_store_qcaps_output *resp)
6919 {
6920         struct bnxt_mem_init *mem_init;
6921         u16 init_mask;
6922         u8 init_val;
6923         u8 *offset;
6924         int i;
6925
6926         init_val = resp->ctx_kind_initializer;
6927         init_mask = le16_to_cpu(resp->ctx_init_mask);
6928         offset = &resp->qp_init_offset;
6929         mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6930         for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
6931                 mem_init->init_val = init_val;
6932                 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6933                 if (!init_mask)
6934                         continue;
6935                 if (i == BNXT_CTX_MEM_INIT_STAT)
6936                         offset = &resp->stat_init_offset;
6937                 if (init_mask & (1 << i))
6938                         mem_init->offset = *offset * 4;
6939                 else
6940                         mem_init->init_val = 0;
6941         }
6942         ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6943         ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6944         ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6945         ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6946         ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6947         ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
6948 }
6949
6950 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6951 {
6952         struct hwrm_func_backing_store_qcaps_input req = {0};
6953         struct hwrm_func_backing_store_qcaps_output *resp =
6954                 bp->hwrm_cmd_resp_addr;
6955         int rc;
6956
6957         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6958                 return 0;
6959
6960         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6961         mutex_lock(&bp->hwrm_cmd_lock);
6962         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6963         if (!rc) {
6964                 struct bnxt_ctx_pg_info *ctx_pg;
6965                 struct bnxt_ctx_mem_info *ctx;
6966                 int i, tqm_rings;
6967
6968                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6969                 if (!ctx) {
6970                         rc = -ENOMEM;
6971                         goto ctx_err;
6972                 }
6973                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6974                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6975                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6976                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6977                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6978                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6979                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6980                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6981                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6982                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6983                 ctx->vnic_max_vnic_entries =
6984                         le16_to_cpu(resp->vnic_max_vnic_entries);
6985                 ctx->vnic_max_ring_table_entries =
6986                         le16_to_cpu(resp->vnic_max_ring_table_entries);
6987                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6988                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6989                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6990                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6991                 ctx->tqm_min_entries_per_ring =
6992                         le32_to_cpu(resp->tqm_min_entries_per_ring);
6993                 ctx->tqm_max_entries_per_ring =
6994                         le32_to_cpu(resp->tqm_max_entries_per_ring);
6995                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6996                 if (!ctx->tqm_entries_multiple)
6997                         ctx->tqm_entries_multiple = 1;
6998                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6999                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
7000                 ctx->mrav_num_entries_units =
7001                         le16_to_cpu(resp->mrav_num_entries_units);
7002                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
7003                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
7004
7005                 bnxt_init_ctx_initializer(ctx, resp);
7006
7007                 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
7008                 if (!ctx->tqm_fp_rings_count)
7009                         ctx->tqm_fp_rings_count = bp->max_q;
7010                 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
7011                         ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
7012
7013                 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
7014                 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
7015                 if (!ctx_pg) {
7016                         kfree(ctx);
7017                         rc = -ENOMEM;
7018                         goto ctx_err;
7019                 }
7020                 for (i = 0; i < tqm_rings; i++, ctx_pg++)
7021                         ctx->tqm_mem[i] = ctx_pg;
7022                 bp->ctx = ctx;
7023         } else {
7024                 rc = 0;
7025         }
7026 ctx_err:
7027         mutex_unlock(&bp->hwrm_cmd_lock);
7028         return rc;
7029 }
7030
7031 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
7032                                   __le64 *pg_dir)
7033 {
7034         if (!rmem->nr_pages)
7035                 return;
7036
7037         BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
7038         if (rmem->depth >= 1) {
7039                 if (rmem->depth == 2)
7040                         *pg_attr |= 2;
7041                 else
7042                         *pg_attr |= 1;
7043                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
7044         } else {
7045                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
7046         }
7047 }
7048
7049 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
7050         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
7051          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
7052          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
7053          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
7054          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7055
7056 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7057 {
7058         struct hwrm_func_backing_store_cfg_input req = {0};
7059         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7060         struct bnxt_ctx_pg_info *ctx_pg;
7061         u32 req_len = sizeof(req);
7062         __le32 *num_entries;
7063         __le64 *pg_dir;
7064         u32 flags = 0;
7065         u8 *pg_attr;
7066         u32 ena;
7067         int i;
7068
7069         if (!ctx)
7070                 return 0;
7071
7072         if (req_len > bp->hwrm_max_ext_req_len)
7073                 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
7074         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
7075         req.enables = cpu_to_le32(enables);
7076
7077         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7078                 ctx_pg = &ctx->qp_mem;
7079                 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
7080                 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7081                 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7082                 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7083                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7084                                       &req.qpc_pg_size_qpc_lvl,
7085                                       &req.qpc_page_dir);
7086         }
7087         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7088                 ctx_pg = &ctx->srq_mem;
7089                 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
7090                 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7091                 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7092                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7093                                       &req.srq_pg_size_srq_lvl,
7094                                       &req.srq_page_dir);
7095         }
7096         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7097                 ctx_pg = &ctx->cq_mem;
7098                 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
7099                 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7100                 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7101                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
7102                                       &req.cq_page_dir);
7103         }
7104         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7105                 ctx_pg = &ctx->vnic_mem;
7106                 req.vnic_num_vnic_entries =
7107                         cpu_to_le16(ctx->vnic_max_vnic_entries);
7108                 req.vnic_num_ring_table_entries =
7109                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
7110                 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7111                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7112                                       &req.vnic_pg_size_vnic_lvl,
7113                                       &req.vnic_page_dir);
7114         }
7115         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7116                 ctx_pg = &ctx->stat_mem;
7117                 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7118                 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7119                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7120                                       &req.stat_pg_size_stat_lvl,
7121                                       &req.stat_page_dir);
7122         }
7123         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7124                 ctx_pg = &ctx->mrav_mem;
7125                 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7126                 if (ctx->mrav_num_entries_units)
7127                         flags |=
7128                         FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7129                 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7130                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7131                                       &req.mrav_pg_size_mrav_lvl,
7132                                       &req.mrav_page_dir);
7133         }
7134         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7135                 ctx_pg = &ctx->tim_mem;
7136                 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
7137                 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7138                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7139                                       &req.tim_pg_size_tim_lvl,
7140                                       &req.tim_page_dir);
7141         }
7142         for (i = 0, num_entries = &req.tqm_sp_num_entries,
7143              pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
7144              pg_dir = &req.tqm_sp_page_dir,
7145              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7146              i < BNXT_MAX_TQM_RINGS;
7147              i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7148                 if (!(enables & ena))
7149                         continue;
7150
7151                 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7152                 ctx_pg = ctx->tqm_mem[i];
7153                 *num_entries = cpu_to_le32(ctx_pg->entries);
7154                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7155         }
7156         req.flags = cpu_to_le32(flags);
7157         return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT);
7158 }
7159
7160 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7161                                   struct bnxt_ctx_pg_info *ctx_pg)
7162 {
7163         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7164
7165         rmem->page_size = BNXT_PAGE_SIZE;
7166         rmem->pg_arr = ctx_pg->ctx_pg_arr;
7167         rmem->dma_arr = ctx_pg->ctx_dma_arr;
7168         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7169         if (rmem->depth >= 1)
7170                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7171         return bnxt_alloc_ring(bp, rmem);
7172 }
7173
7174 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7175                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7176                                   u8 depth, struct bnxt_mem_init *mem_init)
7177 {
7178         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7179         int rc;
7180
7181         if (!mem_size)
7182                 return -EINVAL;
7183
7184         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7185         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7186                 ctx_pg->nr_pages = 0;
7187                 return -EINVAL;
7188         }
7189         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7190                 int nr_tbls, i;
7191
7192                 rmem->depth = 2;
7193                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7194                                              GFP_KERNEL);
7195                 if (!ctx_pg->ctx_pg_tbl)
7196                         return -ENOMEM;
7197                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7198                 rmem->nr_pages = nr_tbls;
7199                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7200                 if (rc)
7201                         return rc;
7202                 for (i = 0; i < nr_tbls; i++) {
7203                         struct bnxt_ctx_pg_info *pg_tbl;
7204
7205                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7206                         if (!pg_tbl)
7207                                 return -ENOMEM;
7208                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7209                         rmem = &pg_tbl->ring_mem;
7210                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7211                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7212                         rmem->depth = 1;
7213                         rmem->nr_pages = MAX_CTX_PAGES;
7214                         rmem->mem_init = mem_init;
7215                         if (i == (nr_tbls - 1)) {
7216                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7217
7218                                 if (rem)
7219                                         rmem->nr_pages = rem;
7220                         }
7221                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7222                         if (rc)
7223                                 break;
7224                 }
7225         } else {
7226                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7227                 if (rmem->nr_pages > 1 || depth)
7228                         rmem->depth = 1;
7229                 rmem->mem_init = mem_init;
7230                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7231         }
7232         return rc;
7233 }
7234
7235 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7236                                   struct bnxt_ctx_pg_info *ctx_pg)
7237 {
7238         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7239
7240         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7241             ctx_pg->ctx_pg_tbl) {
7242                 int i, nr_tbls = rmem->nr_pages;
7243
7244                 for (i = 0; i < nr_tbls; i++) {
7245                         struct bnxt_ctx_pg_info *pg_tbl;
7246                         struct bnxt_ring_mem_info *rmem2;
7247
7248                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
7249                         if (!pg_tbl)
7250                                 continue;
7251                         rmem2 = &pg_tbl->ring_mem;
7252                         bnxt_free_ring(bp, rmem2);
7253                         ctx_pg->ctx_pg_arr[i] = NULL;
7254                         kfree(pg_tbl);
7255                         ctx_pg->ctx_pg_tbl[i] = NULL;
7256                 }
7257                 kfree(ctx_pg->ctx_pg_tbl);
7258                 ctx_pg->ctx_pg_tbl = NULL;
7259         }
7260         bnxt_free_ring(bp, rmem);
7261         ctx_pg->nr_pages = 0;
7262 }
7263
7264 static void bnxt_free_ctx_mem(struct bnxt *bp)
7265 {
7266         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7267         int i;
7268
7269         if (!ctx)
7270                 return;
7271
7272         if (ctx->tqm_mem[0]) {
7273                 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7274                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7275                 kfree(ctx->tqm_mem[0]);
7276                 ctx->tqm_mem[0] = NULL;
7277         }
7278
7279         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7280         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7281         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7282         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7283         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7284         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7285         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7286         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7287 }
7288
7289 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7290 {
7291         struct bnxt_ctx_pg_info *ctx_pg;
7292         struct bnxt_ctx_mem_info *ctx;
7293         struct bnxt_mem_init *init;
7294         u32 mem_size, ena, entries;
7295         u32 entries_sp, min;
7296         u32 num_mr, num_ah;
7297         u32 extra_srqs = 0;
7298         u32 extra_qps = 0;
7299         u8 pg_lvl = 1;
7300         int i, rc;
7301
7302         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7303         if (rc) {
7304                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7305                            rc);
7306                 return rc;
7307         }
7308         ctx = bp->ctx;
7309         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7310                 return 0;
7311
7312         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7313                 pg_lvl = 2;
7314                 extra_qps = 65536;
7315                 extra_srqs = 8192;
7316         }
7317
7318         ctx_pg = &ctx->qp_mem;
7319         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7320                           extra_qps;
7321         if (ctx->qp_entry_size) {
7322                 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7323                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7324                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7325                 if (rc)
7326                         return rc;
7327         }
7328
7329         ctx_pg = &ctx->srq_mem;
7330         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7331         if (ctx->srq_entry_size) {
7332                 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7333                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7334                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7335                 if (rc)
7336                         return rc;
7337         }
7338
7339         ctx_pg = &ctx->cq_mem;
7340         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7341         if (ctx->cq_entry_size) {
7342                 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7343                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7344                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7345                 if (rc)
7346                         return rc;
7347         }
7348
7349         ctx_pg = &ctx->vnic_mem;
7350         ctx_pg->entries = ctx->vnic_max_vnic_entries +
7351                           ctx->vnic_max_ring_table_entries;
7352         if (ctx->vnic_entry_size) {
7353                 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7354                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7355                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7356                 if (rc)
7357                         return rc;
7358         }
7359
7360         ctx_pg = &ctx->stat_mem;
7361         ctx_pg->entries = ctx->stat_max_entries;
7362         if (ctx->stat_entry_size) {
7363                 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7364                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7365                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7366                 if (rc)
7367                         return rc;
7368         }
7369
7370         ena = 0;
7371         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7372                 goto skip_rdma;
7373
7374         ctx_pg = &ctx->mrav_mem;
7375         /* 128K extra is needed to accommodate static AH context
7376          * allocation by f/w.
7377          */
7378         num_mr = 1024 * 256;
7379         num_ah = 1024 * 128;
7380         ctx_pg->entries = num_mr + num_ah;
7381         if (ctx->mrav_entry_size) {
7382                 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7383                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7384                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7385                 if (rc)
7386                         return rc;
7387         }
7388         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7389         if (ctx->mrav_num_entries_units)
7390                 ctx_pg->entries =
7391                         ((num_mr / ctx->mrav_num_entries_units) << 16) |
7392                          (num_ah / ctx->mrav_num_entries_units);
7393
7394         ctx_pg = &ctx->tim_mem;
7395         ctx_pg->entries = ctx->qp_mem.entries;
7396         if (ctx->tim_entry_size) {
7397                 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7398                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7399                 if (rc)
7400                         return rc;
7401         }
7402         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7403
7404 skip_rdma:
7405         min = ctx->tqm_min_entries_per_ring;
7406         entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7407                      2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7408         entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7409         entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7410         entries = roundup(entries, ctx->tqm_entries_multiple);
7411         entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7412         for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7413                 ctx_pg = ctx->tqm_mem[i];
7414                 ctx_pg->entries = i ? entries : entries_sp;
7415                 if (ctx->tqm_entry_size) {
7416                         mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7417                         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7418                                                     NULL);
7419                         if (rc)
7420                                 return rc;
7421                 }
7422                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7423         }
7424         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7425         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7426         if (rc) {
7427                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7428                            rc);
7429                 return rc;
7430         }
7431         ctx->flags |= BNXT_CTX_FLAG_INITED;
7432         return 0;
7433 }
7434
7435 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7436 {
7437         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7438         struct hwrm_func_resource_qcaps_input req = {0};
7439         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7440         int rc;
7441
7442         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7443         req.fid = cpu_to_le16(0xffff);
7444
7445         mutex_lock(&bp->hwrm_cmd_lock);
7446         rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7447                                        HWRM_CMD_TIMEOUT);
7448         if (rc)
7449                 goto hwrm_func_resc_qcaps_exit;
7450
7451         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7452         if (!all)
7453                 goto hwrm_func_resc_qcaps_exit;
7454
7455         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7456         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7457         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7458         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7459         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7460         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7461         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7462         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7463         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7464         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7465         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7466         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7467         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7468         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7469         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7470         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7471
7472         if (bp->flags & BNXT_FLAG_CHIP_P5) {
7473                 u16 max_msix = le16_to_cpu(resp->max_msix);
7474
7475                 hw_resc->max_nqs = max_msix;
7476                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7477         }
7478
7479         if (BNXT_PF(bp)) {
7480                 struct bnxt_pf_info *pf = &bp->pf;
7481
7482                 pf->vf_resv_strategy =
7483                         le16_to_cpu(resp->vf_reservation_strategy);
7484                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7485                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7486         }
7487 hwrm_func_resc_qcaps_exit:
7488         mutex_unlock(&bp->hwrm_cmd_lock);
7489         return rc;
7490 }
7491
7492 /* bp->hwrm_cmd_lock already held. */
7493 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7494 {
7495         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7496         struct hwrm_port_mac_ptp_qcfg_input req = {0};
7497         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7498         u8 flags;
7499         int rc;
7500
7501         if (bp->hwrm_spec_code < 0x10801) {
7502                 rc = -ENODEV;
7503                 goto no_ptp;
7504         }
7505
7506         req.port_id = cpu_to_le16(bp->pf.port_id);
7507         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_PTP_QCFG, -1, -1);
7508         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7509         if (rc)
7510                 goto no_ptp;
7511
7512         flags = resp->flags;
7513         if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7514                 rc = -ENODEV;
7515                 goto no_ptp;
7516         }
7517         if (!ptp) {
7518                 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7519                 if (!ptp)
7520                         return -ENOMEM;
7521                 ptp->bp = bp;
7522                 bp->ptp_cfg = ptp;
7523         }
7524         if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7525                 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7526                 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7527         } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7528                 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7529                 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7530         } else {
7531                 rc = -ENODEV;
7532                 goto no_ptp;
7533         }
7534         return 0;
7535
7536 no_ptp:
7537         kfree(ptp);
7538         bp->ptp_cfg = NULL;
7539         return rc;
7540 }
7541
7542 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7543 {
7544         int rc = 0;
7545         struct hwrm_func_qcaps_input req = {0};
7546         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7547         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7548         u32 flags, flags_ext;
7549
7550         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7551         req.fid = cpu_to_le16(0xffff);
7552
7553         mutex_lock(&bp->hwrm_cmd_lock);
7554         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7555         if (rc)
7556                 goto hwrm_func_qcaps_exit;
7557
7558         flags = le32_to_cpu(resp->flags);
7559         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7560                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7561         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7562                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7563         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7564                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7565         if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7566                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7567         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7568                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7569         if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7570                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7571         if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7572                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7573         if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7574                 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7575
7576         flags_ext = le32_to_cpu(resp->flags_ext);
7577         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7578                 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7579
7580         bp->tx_push_thresh = 0;
7581         if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7582             BNXT_FW_MAJ(bp) > 217)
7583                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7584
7585         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7586         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7587         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7588         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7589         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7590         if (!hw_resc->max_hw_ring_grps)
7591                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7592         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7593         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7594         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7595
7596         if (BNXT_PF(bp)) {
7597                 struct bnxt_pf_info *pf = &bp->pf;
7598
7599                 pf->fw_fid = le16_to_cpu(resp->fid);
7600                 pf->port_id = le16_to_cpu(resp->port_id);
7601                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7602                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7603                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7604                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7605                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7606                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7607                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7608                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7609                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7610                 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7611                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7612                         bp->flags |= BNXT_FLAG_WOL_CAP;
7613                 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7614                         __bnxt_hwrm_ptp_qcfg(bp);
7615                 } else {
7616                         kfree(bp->ptp_cfg);
7617                         bp->ptp_cfg = NULL;
7618                 }
7619         } else {
7620 #ifdef CONFIG_BNXT_SRIOV
7621                 struct bnxt_vf_info *vf = &bp->vf;
7622
7623                 vf->fw_fid = le16_to_cpu(resp->fid);
7624                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7625 #endif
7626         }
7627
7628 hwrm_func_qcaps_exit:
7629         mutex_unlock(&bp->hwrm_cmd_lock);
7630         return rc;
7631 }
7632
7633 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7634
7635 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7636 {
7637         int rc;
7638
7639         rc = __bnxt_hwrm_func_qcaps(bp);
7640         if (rc)
7641                 return rc;
7642         rc = bnxt_hwrm_queue_qportcfg(bp);
7643         if (rc) {
7644                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7645                 return rc;
7646         }
7647         if (bp->hwrm_spec_code >= 0x10803) {
7648                 rc = bnxt_alloc_ctx_mem(bp);
7649                 if (rc)
7650                         return rc;
7651                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7652                 if (!rc)
7653                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7654         }
7655         return 0;
7656 }
7657
7658 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7659 {
7660         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7661         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7662         int rc = 0;
7663         u32 flags;
7664
7665         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7666                 return 0;
7667
7668         resp = bp->hwrm_cmd_resp_addr;
7669         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7670
7671         mutex_lock(&bp->hwrm_cmd_lock);
7672         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7673         if (rc)
7674                 goto hwrm_cfa_adv_qcaps_exit;
7675
7676         flags = le32_to_cpu(resp->flags);
7677         if (flags &
7678             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7679                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7680
7681 hwrm_cfa_adv_qcaps_exit:
7682         mutex_unlock(&bp->hwrm_cmd_lock);
7683         return rc;
7684 }
7685
7686 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7687 {
7688         if (bp->fw_health)
7689                 return 0;
7690
7691         bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7692         if (!bp->fw_health)
7693                 return -ENOMEM;
7694
7695         return 0;
7696 }
7697
7698 static int bnxt_alloc_fw_health(struct bnxt *bp)
7699 {
7700         int rc;
7701
7702         if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7703             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7704                 return 0;
7705
7706         rc = __bnxt_alloc_fw_health(bp);
7707         if (rc) {
7708                 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7709                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7710                 return rc;
7711         }
7712
7713         return 0;
7714 }
7715
7716 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7717 {
7718         writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7719                                          BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7720                                          BNXT_FW_HEALTH_WIN_MAP_OFF);
7721 }
7722
7723 bool bnxt_is_fw_healthy(struct bnxt *bp)
7724 {
7725         if (bp->fw_health && bp->fw_health->status_reliable) {
7726                 u32 fw_status;
7727
7728                 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7729                 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7730                         return false;
7731         }
7732
7733         return true;
7734 }
7735
7736 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7737 {
7738         struct bnxt_fw_health *fw_health = bp->fw_health;
7739         u32 reg_type;
7740
7741         if (!fw_health || !fw_health->status_reliable)
7742                 return;
7743
7744         reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7745         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7746                 fw_health->status_reliable = false;
7747 }
7748
7749 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7750 {
7751         void __iomem *hs;
7752         u32 status_loc;
7753         u32 reg_type;
7754         u32 sig;
7755
7756         if (bp->fw_health)
7757                 bp->fw_health->status_reliable = false;
7758
7759         __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7760         hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7761
7762         sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7763         if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7764                 if (!bp->chip_num) {
7765                         __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7766                         bp->chip_num = readl(bp->bar0 +
7767                                              BNXT_FW_HEALTH_WIN_BASE +
7768                                              BNXT_GRC_REG_CHIP_NUM);
7769                 }
7770                 if (!BNXT_CHIP_P5(bp))
7771                         return;
7772
7773                 status_loc = BNXT_GRC_REG_STATUS_P5 |
7774                              BNXT_FW_HEALTH_REG_TYPE_BAR0;
7775         } else {
7776                 status_loc = readl(hs + offsetof(struct hcomm_status,
7777                                                  fw_status_loc));
7778         }
7779
7780         if (__bnxt_alloc_fw_health(bp)) {
7781                 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7782                 return;
7783         }
7784
7785         bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7786         reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7787         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7788                 __bnxt_map_fw_health_reg(bp, status_loc);
7789                 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7790                         BNXT_FW_HEALTH_WIN_OFF(status_loc);
7791         }
7792
7793         bp->fw_health->status_reliable = true;
7794 }
7795
7796 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7797 {
7798         struct bnxt_fw_health *fw_health = bp->fw_health;
7799         u32 reg_base = 0xffffffff;
7800         int i;
7801
7802         bp->fw_health->status_reliable = false;
7803         /* Only pre-map the monitoring GRC registers using window 3 */
7804         for (i = 0; i < 4; i++) {
7805                 u32 reg = fw_health->regs[i];
7806
7807                 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7808                         continue;
7809                 if (reg_base == 0xffffffff)
7810                         reg_base = reg & BNXT_GRC_BASE_MASK;
7811                 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7812                         return -ERANGE;
7813                 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7814         }
7815         bp->fw_health->status_reliable = true;
7816         if (reg_base == 0xffffffff)
7817                 return 0;
7818
7819         __bnxt_map_fw_health_reg(bp, reg_base);
7820         return 0;
7821 }
7822
7823 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7824 {
7825         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7826         struct bnxt_fw_health *fw_health = bp->fw_health;
7827         struct hwrm_error_recovery_qcfg_input req = {0};
7828         int rc, i;
7829
7830         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7831                 return 0;
7832
7833         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7834         mutex_lock(&bp->hwrm_cmd_lock);
7835         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7836         if (rc)
7837                 goto err_recovery_out;
7838         fw_health->flags = le32_to_cpu(resp->flags);
7839         if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7840             !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7841                 rc = -EINVAL;
7842                 goto err_recovery_out;
7843         }
7844         fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7845         fw_health->master_func_wait_dsecs =
7846                 le32_to_cpu(resp->master_func_wait_period);
7847         fw_health->normal_func_wait_dsecs =
7848                 le32_to_cpu(resp->normal_func_wait_period);
7849         fw_health->post_reset_wait_dsecs =
7850                 le32_to_cpu(resp->master_func_wait_period_after_reset);
7851         fw_health->post_reset_max_wait_dsecs =
7852                 le32_to_cpu(resp->max_bailout_time_after_reset);
7853         fw_health->regs[BNXT_FW_HEALTH_REG] =
7854                 le32_to_cpu(resp->fw_health_status_reg);
7855         fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7856                 le32_to_cpu(resp->fw_heartbeat_reg);
7857         fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7858                 le32_to_cpu(resp->fw_reset_cnt_reg);
7859         fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7860                 le32_to_cpu(resp->reset_inprogress_reg);
7861         fw_health->fw_reset_inprog_reg_mask =
7862                 le32_to_cpu(resp->reset_inprogress_reg_mask);
7863         fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7864         if (fw_health->fw_reset_seq_cnt >= 16) {
7865                 rc = -EINVAL;
7866                 goto err_recovery_out;
7867         }
7868         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7869                 fw_health->fw_reset_seq_regs[i] =
7870                         le32_to_cpu(resp->reset_reg[i]);
7871                 fw_health->fw_reset_seq_vals[i] =
7872                         le32_to_cpu(resp->reset_reg_val[i]);
7873                 fw_health->fw_reset_seq_delay_msec[i] =
7874                         resp->delay_after_reset[i];
7875         }
7876 err_recovery_out:
7877         mutex_unlock(&bp->hwrm_cmd_lock);
7878         if (!rc)
7879                 rc = bnxt_map_fw_health_regs(bp);
7880         if (rc)
7881                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7882         return rc;
7883 }
7884
7885 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7886 {
7887         struct hwrm_func_reset_input req = {0};
7888
7889         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7890         req.enables = 0;
7891
7892         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7893 }
7894
7895 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7896 {
7897         struct hwrm_nvm_get_dev_info_output nvm_info;
7898
7899         if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7900                 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7901                          nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7902                          nvm_info.nvm_cfg_ver_upd);
7903 }
7904
7905 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7906 {
7907         int rc = 0;
7908         struct hwrm_queue_qportcfg_input req = {0};
7909         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7910         u8 i, j, *qptr;
7911         bool no_rdma;
7912
7913         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7914
7915         mutex_lock(&bp->hwrm_cmd_lock);
7916         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7917         if (rc)
7918                 goto qportcfg_exit;
7919
7920         if (!resp->max_configurable_queues) {
7921                 rc = -EINVAL;
7922                 goto qportcfg_exit;
7923         }
7924         bp->max_tc = resp->max_configurable_queues;
7925         bp->max_lltc = resp->max_configurable_lossless_queues;
7926         if (bp->max_tc > BNXT_MAX_QUEUE)
7927                 bp->max_tc = BNXT_MAX_QUEUE;
7928
7929         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7930         qptr = &resp->queue_id0;
7931         for (i = 0, j = 0; i < bp->max_tc; i++) {
7932                 bp->q_info[j].queue_id = *qptr;
7933                 bp->q_ids[i] = *qptr++;
7934                 bp->q_info[j].queue_profile = *qptr++;
7935                 bp->tc_to_qidx[j] = j;
7936                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7937                     (no_rdma && BNXT_PF(bp)))
7938                         j++;
7939         }
7940         bp->max_q = bp->max_tc;
7941         bp->max_tc = max_t(u8, j, 1);
7942
7943         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7944                 bp->max_tc = 1;
7945
7946         if (bp->max_lltc > bp->max_tc)
7947                 bp->max_lltc = bp->max_tc;
7948
7949 qportcfg_exit:
7950         mutex_unlock(&bp->hwrm_cmd_lock);
7951         return rc;
7952 }
7953
7954 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7955 {
7956         struct hwrm_ver_get_input req = {0};
7957         int rc;
7958
7959         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7960         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7961         req.hwrm_intf_min = HWRM_VERSION_MINOR;
7962         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7963
7964         rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7965                                    silent);
7966         return rc;
7967 }
7968
7969 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7970 {
7971         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7972         u16 fw_maj, fw_min, fw_bld, fw_rsv;
7973         u32 dev_caps_cfg, hwrm_ver;
7974         int rc, len;
7975
7976         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7977         mutex_lock(&bp->hwrm_cmd_lock);
7978         rc = __bnxt_hwrm_ver_get(bp, false);
7979         if (rc)
7980                 goto hwrm_ver_get_exit;
7981
7982         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7983
7984         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7985                              resp->hwrm_intf_min_8b << 8 |
7986                              resp->hwrm_intf_upd_8b;
7987         if (resp->hwrm_intf_maj_8b < 1) {
7988                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7989                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7990                             resp->hwrm_intf_upd_8b);
7991                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7992         }
7993
7994         hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7995                         HWRM_VERSION_UPDATE;
7996
7997         if (bp->hwrm_spec_code > hwrm_ver)
7998                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7999                          HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
8000                          HWRM_VERSION_UPDATE);
8001         else
8002                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8003                          resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8004                          resp->hwrm_intf_upd_8b);
8005
8006         fw_maj = le16_to_cpu(resp->hwrm_fw_major);
8007         if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
8008                 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
8009                 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
8010                 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
8011                 len = FW_VER_STR_LEN;
8012         } else {
8013                 fw_maj = resp->hwrm_fw_maj_8b;
8014                 fw_min = resp->hwrm_fw_min_8b;
8015                 fw_bld = resp->hwrm_fw_bld_8b;
8016                 fw_rsv = resp->hwrm_fw_rsvd_8b;
8017                 len = BC_HWRM_STR_LEN;
8018         }
8019         bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
8020         snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
8021                  fw_rsv);
8022
8023         if (strlen(resp->active_pkg_name)) {
8024                 int fw_ver_len = strlen(bp->fw_ver_str);
8025
8026                 snprintf(bp->fw_ver_str + fw_ver_len,
8027                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8028                          resp->active_pkg_name);
8029                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8030         }
8031
8032         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8033         if (!bp->hwrm_cmd_timeout)
8034                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8035
8036         if (resp->hwrm_intf_maj_8b >= 1) {
8037                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
8038                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8039         }
8040         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8041                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
8042
8043         bp->chip_num = le16_to_cpu(resp->chip_num);
8044         bp->chip_rev = resp->chip_rev;
8045         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8046             !resp->chip_metal)
8047                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
8048
8049         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8050         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8051             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
8052                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8053
8054         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8055                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8056
8057         if (dev_caps_cfg &
8058             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8059                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8060
8061         if (dev_caps_cfg &
8062             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8063                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8064
8065         if (dev_caps_cfg &
8066             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8067                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8068
8069 hwrm_ver_get_exit:
8070         mutex_unlock(&bp->hwrm_cmd_lock);
8071         return rc;
8072 }
8073
8074 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8075 {
8076         struct hwrm_fw_set_time_input req = {0};
8077         struct tm tm;
8078         time64_t now = ktime_get_real_seconds();
8079
8080         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8081             bp->hwrm_spec_code < 0x10400)
8082                 return -EOPNOTSUPP;
8083
8084         time64_to_tm(now, 0, &tm);
8085         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
8086         req.year = cpu_to_le16(1900 + tm.tm_year);
8087         req.month = 1 + tm.tm_mon;
8088         req.day = tm.tm_mday;
8089         req.hour = tm.tm_hour;
8090         req.minute = tm.tm_min;
8091         req.second = tm.tm_sec;
8092         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8093 }
8094
8095 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8096 {
8097         u64 sw_tmp;
8098
8099         hw &= mask;
8100         sw_tmp = (*sw & ~mask) | hw;
8101         if (hw < (*sw & mask))
8102                 sw_tmp += mask + 1;
8103         WRITE_ONCE(*sw, sw_tmp);
8104 }
8105
8106 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8107                                     int count, bool ignore_zero)
8108 {
8109         int i;
8110
8111         for (i = 0; i < count; i++) {
8112                 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8113
8114                 if (ignore_zero && !hw)
8115                         continue;
8116
8117                 if (masks[i] == -1ULL)
8118                         sw_stats[i] = hw;
8119                 else
8120                         bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8121         }
8122 }
8123
8124 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8125 {
8126         if (!stats->hw_stats)
8127                 return;
8128
8129         __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8130                                 stats->hw_masks, stats->len / 8, false);
8131 }
8132
8133 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8134 {
8135         struct bnxt_stats_mem *ring0_stats;
8136         bool ignore_zero = false;
8137         int i;
8138
8139         /* Chip bug.  Counter intermittently becomes 0. */
8140         if (bp->flags & BNXT_FLAG_CHIP_P5)
8141                 ignore_zero = true;
8142
8143         for (i = 0; i < bp->cp_nr_rings; i++) {
8144                 struct bnxt_napi *bnapi = bp->bnapi[i];
8145                 struct bnxt_cp_ring_info *cpr;
8146                 struct bnxt_stats_mem *stats;
8147
8148                 cpr = &bnapi->cp_ring;
8149                 stats = &cpr->stats;
8150                 if (!i)
8151                         ring0_stats = stats;
8152                 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8153                                         ring0_stats->hw_masks,
8154                                         ring0_stats->len / 8, ignore_zero);
8155         }
8156         if (bp->flags & BNXT_FLAG_PORT_STATS) {
8157                 struct bnxt_stats_mem *stats = &bp->port_stats;
8158                 __le64 *hw_stats = stats->hw_stats;
8159                 u64 *sw_stats = stats->sw_stats;
8160                 u64 *masks = stats->hw_masks;
8161                 int cnt;
8162
8163                 cnt = sizeof(struct rx_port_stats) / 8;
8164                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8165
8166                 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8167                 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8168                 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8169                 cnt = sizeof(struct tx_port_stats) / 8;
8170                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8171         }
8172         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8173                 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8174                 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8175         }
8176 }
8177
8178 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8179 {
8180         struct bnxt_pf_info *pf = &bp->pf;
8181         struct hwrm_port_qstats_input req = {0};
8182
8183         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8184                 return 0;
8185
8186         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8187                 return -EOPNOTSUPP;
8188
8189         req.flags = flags;
8190         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
8191         req.port_id = cpu_to_le16(pf->port_id);
8192         req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8193                                             BNXT_TX_PORT_STATS_BYTE_OFFSET);
8194         req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8195         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8196 }
8197
8198 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8199 {
8200         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
8201         struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
8202         struct hwrm_port_qstats_ext_input req = {0};
8203         struct bnxt_pf_info *pf = &bp->pf;
8204         u32 tx_stat_size;
8205         int rc;
8206
8207         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8208                 return 0;
8209
8210         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8211                 return -EOPNOTSUPP;
8212
8213         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
8214         req.flags = flags;
8215         req.port_id = cpu_to_le16(pf->port_id);
8216         req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8217         req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8218         tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8219                        sizeof(struct tx_port_stats_ext) : 0;
8220         req.tx_stat_size = cpu_to_le16(tx_stat_size);
8221         req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8222         mutex_lock(&bp->hwrm_cmd_lock);
8223         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8224         if (!rc) {
8225                 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
8226                 bp->fw_tx_stats_ext_size = tx_stat_size ?
8227                         le16_to_cpu(resp->tx_stat_size) / 8 : 0;
8228         } else {
8229                 bp->fw_rx_stats_ext_size = 0;
8230                 bp->fw_tx_stats_ext_size = 0;
8231         }
8232         if (flags)
8233                 goto qstats_done;
8234
8235         if (bp->fw_tx_stats_ext_size <=
8236             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8237                 mutex_unlock(&bp->hwrm_cmd_lock);
8238                 bp->pri2cos_valid = 0;
8239                 return rc;
8240         }
8241
8242         bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
8243         req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8244
8245         rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
8246         if (!rc) {
8247                 struct hwrm_queue_pri2cos_qcfg_output *resp2;
8248                 u8 *pri2cos;
8249                 int i, j;
8250
8251                 resp2 = bp->hwrm_cmd_resp_addr;
8252                 pri2cos = &resp2->pri0_cos_queue_id;
8253                 for (i = 0; i < 8; i++) {
8254                         u8 queue_id = pri2cos[i];
8255                         u8 queue_idx;
8256
8257                         /* Per port queue IDs start from 0, 10, 20, etc */
8258                         queue_idx = queue_id % 10;
8259                         if (queue_idx > BNXT_MAX_QUEUE) {
8260                                 bp->pri2cos_valid = false;
8261                                 goto qstats_done;
8262                         }
8263                         for (j = 0; j < bp->max_q; j++) {
8264                                 if (bp->q_ids[j] == queue_id)
8265                                         bp->pri2cos_idx[i] = queue_idx;
8266                         }
8267                 }
8268                 bp->pri2cos_valid = 1;
8269         }
8270 qstats_done:
8271         mutex_unlock(&bp->hwrm_cmd_lock);
8272         return rc;
8273 }
8274
8275 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8276 {
8277         if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
8278                 bnxt_hwrm_tunnel_dst_port_free(
8279                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8280         if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
8281                 bnxt_hwrm_tunnel_dst_port_free(
8282                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8283 }
8284
8285 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8286 {
8287         int rc, i;
8288         u32 tpa_flags = 0;
8289
8290         if (set_tpa)
8291                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8292         else if (BNXT_NO_FW_ACCESS(bp))
8293                 return 0;
8294         for (i = 0; i < bp->nr_vnics; i++) {
8295                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8296                 if (rc) {
8297                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8298                                    i, rc);
8299                         return rc;
8300                 }
8301         }
8302         return 0;
8303 }
8304
8305 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8306 {
8307         int i;
8308
8309         for (i = 0; i < bp->nr_vnics; i++)
8310                 bnxt_hwrm_vnic_set_rss(bp, i, false);
8311 }
8312
8313 static void bnxt_clear_vnic(struct bnxt *bp)
8314 {
8315         if (!bp->vnic_info)
8316                 return;
8317
8318         bnxt_hwrm_clear_vnic_filter(bp);
8319         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8320                 /* clear all RSS setting before free vnic ctx */
8321                 bnxt_hwrm_clear_vnic_rss(bp);
8322                 bnxt_hwrm_vnic_ctx_free(bp);
8323         }
8324         /* before free the vnic, undo the vnic tpa settings */
8325         if (bp->flags & BNXT_FLAG_TPA)
8326                 bnxt_set_tpa(bp, false);
8327         bnxt_hwrm_vnic_free(bp);
8328         if (bp->flags & BNXT_FLAG_CHIP_P5)
8329                 bnxt_hwrm_vnic_ctx_free(bp);
8330 }
8331
8332 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8333                                     bool irq_re_init)
8334 {
8335         bnxt_clear_vnic(bp);
8336         bnxt_hwrm_ring_free(bp, close_path);
8337         bnxt_hwrm_ring_grp_free(bp);
8338         if (irq_re_init) {
8339                 bnxt_hwrm_stat_ctx_free(bp);
8340                 bnxt_hwrm_free_tunnel_ports(bp);
8341         }
8342 }
8343
8344 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8345 {
8346         struct hwrm_func_cfg_input req = {0};
8347
8348         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8349         req.fid = cpu_to_le16(0xffff);
8350         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8351         if (br_mode == BRIDGE_MODE_VEB)
8352                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8353         else if (br_mode == BRIDGE_MODE_VEPA)
8354                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8355         else
8356                 return -EINVAL;
8357         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8358 }
8359
8360 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8361 {
8362         struct hwrm_func_cfg_input req = {0};
8363
8364         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8365                 return 0;
8366
8367         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8368         req.fid = cpu_to_le16(0xffff);
8369         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8370         req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8371         if (size == 128)
8372                 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8373
8374         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8375 }
8376
8377 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8378 {
8379         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8380         int rc;
8381
8382         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8383                 goto skip_rss_ctx;
8384
8385         /* allocate context for vnic */
8386         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8387         if (rc) {
8388                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8389                            vnic_id, rc);
8390                 goto vnic_setup_err;
8391         }
8392         bp->rsscos_nr_ctxs++;
8393
8394         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8395                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8396                 if (rc) {
8397                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8398                                    vnic_id, rc);
8399                         goto vnic_setup_err;
8400                 }
8401                 bp->rsscos_nr_ctxs++;
8402         }
8403
8404 skip_rss_ctx:
8405         /* configure default vnic, ring grp */
8406         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8407         if (rc) {
8408                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8409                            vnic_id, rc);
8410                 goto vnic_setup_err;
8411         }
8412
8413         /* Enable RSS hashing on vnic */
8414         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8415         if (rc) {
8416                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8417                            vnic_id, rc);
8418                 goto vnic_setup_err;
8419         }
8420
8421         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8422                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8423                 if (rc) {
8424                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8425                                    vnic_id, rc);
8426                 }
8427         }
8428
8429 vnic_setup_err:
8430         return rc;
8431 }
8432
8433 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8434 {
8435         int rc, i, nr_ctxs;
8436
8437         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8438         for (i = 0; i < nr_ctxs; i++) {
8439                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8440                 if (rc) {
8441                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8442                                    vnic_id, i, rc);
8443                         break;
8444                 }
8445                 bp->rsscos_nr_ctxs++;
8446         }
8447         if (i < nr_ctxs)
8448                 return -ENOMEM;
8449
8450         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8451         if (rc) {
8452                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8453                            vnic_id, rc);
8454                 return rc;
8455         }
8456         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8457         if (rc) {
8458                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8459                            vnic_id, rc);
8460                 return rc;
8461         }
8462         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8463                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8464                 if (rc) {
8465                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8466                                    vnic_id, rc);
8467                 }
8468         }
8469         return rc;
8470 }
8471
8472 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8473 {
8474         if (bp->flags & BNXT_FLAG_CHIP_P5)
8475                 return __bnxt_setup_vnic_p5(bp, vnic_id);
8476         else
8477                 return __bnxt_setup_vnic(bp, vnic_id);
8478 }
8479
8480 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8481 {
8482 #ifdef CONFIG_RFS_ACCEL
8483         int i, rc = 0;
8484
8485         if (bp->flags & BNXT_FLAG_CHIP_P5)
8486                 return 0;
8487
8488         for (i = 0; i < bp->rx_nr_rings; i++) {
8489                 struct bnxt_vnic_info *vnic;
8490                 u16 vnic_id = i + 1;
8491                 u16 ring_id = i;
8492
8493                 if (vnic_id >= bp->nr_vnics)
8494                         break;
8495
8496                 vnic = &bp->vnic_info[vnic_id];
8497                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8498                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8499                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8500                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8501                 if (rc) {
8502                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8503                                    vnic_id, rc);
8504                         break;
8505                 }
8506                 rc = bnxt_setup_vnic(bp, vnic_id);
8507                 if (rc)
8508                         break;
8509         }
8510         return rc;
8511 #else
8512         return 0;
8513 #endif
8514 }
8515
8516 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8517 static bool bnxt_promisc_ok(struct bnxt *bp)
8518 {
8519 #ifdef CONFIG_BNXT_SRIOV
8520         if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8521                 return false;
8522 #endif
8523         return true;
8524 }
8525
8526 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8527 {
8528         unsigned int rc = 0;
8529
8530         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8531         if (rc) {
8532                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8533                            rc);
8534                 return rc;
8535         }
8536
8537         rc = bnxt_hwrm_vnic_cfg(bp, 1);
8538         if (rc) {
8539                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8540                            rc);
8541                 return rc;
8542         }
8543         return rc;
8544 }
8545
8546 static int bnxt_cfg_rx_mode(struct bnxt *);
8547 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8548
8549 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8550 {
8551         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8552         int rc = 0;
8553         unsigned int rx_nr_rings = bp->rx_nr_rings;
8554
8555         if (irq_re_init) {
8556                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8557                 if (rc) {
8558                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8559                                    rc);
8560                         goto err_out;
8561                 }
8562         }
8563
8564         rc = bnxt_hwrm_ring_alloc(bp);
8565         if (rc) {
8566                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8567                 goto err_out;
8568         }
8569
8570         rc = bnxt_hwrm_ring_grp_alloc(bp);
8571         if (rc) {
8572                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8573                 goto err_out;
8574         }
8575
8576         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8577                 rx_nr_rings--;
8578
8579         /* default vnic 0 */
8580         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8581         if (rc) {
8582                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8583                 goto err_out;
8584         }
8585
8586         rc = bnxt_setup_vnic(bp, 0);
8587         if (rc)
8588                 goto err_out;
8589
8590         if (bp->flags & BNXT_FLAG_RFS) {
8591                 rc = bnxt_alloc_rfs_vnics(bp);
8592                 if (rc)
8593                         goto err_out;
8594         }
8595
8596         if (bp->flags & BNXT_FLAG_TPA) {
8597                 rc = bnxt_set_tpa(bp, true);
8598                 if (rc)
8599                         goto err_out;
8600         }
8601
8602         if (BNXT_VF(bp))
8603                 bnxt_update_vf_mac(bp);
8604
8605         /* Filter for default vnic 0 */
8606         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8607         if (rc) {
8608                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8609                 goto err_out;
8610         }
8611         vnic->uc_filter_count = 1;
8612
8613         vnic->rx_mask = 0;
8614         if (bp->dev->flags & IFF_BROADCAST)
8615                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8616
8617         if (bp->dev->flags & IFF_PROMISC)
8618                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8619
8620         if (bp->dev->flags & IFF_ALLMULTI) {
8621                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8622                 vnic->mc_list_count = 0;
8623         } else {
8624                 u32 mask = 0;
8625
8626                 bnxt_mc_list_updated(bp, &mask);
8627                 vnic->rx_mask |= mask;
8628         }
8629
8630         rc = bnxt_cfg_rx_mode(bp);
8631         if (rc)
8632                 goto err_out;
8633
8634         rc = bnxt_hwrm_set_coal(bp);
8635         if (rc)
8636                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8637                                 rc);
8638
8639         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8640                 rc = bnxt_setup_nitroa0_vnic(bp);
8641                 if (rc)
8642                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8643                                    rc);
8644         }
8645
8646         if (BNXT_VF(bp)) {
8647                 bnxt_hwrm_func_qcfg(bp);
8648                 netdev_update_features(bp->dev);
8649         }
8650
8651         return 0;
8652
8653 err_out:
8654         bnxt_hwrm_resource_free(bp, 0, true);
8655
8656         return rc;
8657 }
8658
8659 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8660 {
8661         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8662         return 0;
8663 }
8664
8665 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8666 {
8667         bnxt_init_cp_rings(bp);
8668         bnxt_init_rx_rings(bp);
8669         bnxt_init_tx_rings(bp);
8670         bnxt_init_ring_grps(bp, irq_re_init);
8671         bnxt_init_vnics(bp);
8672
8673         return bnxt_init_chip(bp, irq_re_init);
8674 }
8675
8676 static int bnxt_set_real_num_queues(struct bnxt *bp)
8677 {
8678         int rc;
8679         struct net_device *dev = bp->dev;
8680
8681         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8682                                           bp->tx_nr_rings_xdp);
8683         if (rc)
8684                 return rc;
8685
8686         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8687         if (rc)
8688                 return rc;
8689
8690 #ifdef CONFIG_RFS_ACCEL
8691         if (bp->flags & BNXT_FLAG_RFS)
8692                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8693 #endif
8694
8695         return rc;
8696 }
8697
8698 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8699                            bool shared)
8700 {
8701         int _rx = *rx, _tx = *tx;
8702
8703         if (shared) {
8704                 *rx = min_t(int, _rx, max);
8705                 *tx = min_t(int, _tx, max);
8706         } else {
8707                 if (max < 2)
8708                         return -ENOMEM;
8709
8710                 while (_rx + _tx > max) {
8711                         if (_rx > _tx && _rx > 1)
8712                                 _rx--;
8713                         else if (_tx > 1)
8714                                 _tx--;
8715                 }
8716                 *rx = _rx;
8717                 *tx = _tx;
8718         }
8719         return 0;
8720 }
8721
8722 static void bnxt_setup_msix(struct bnxt *bp)
8723 {
8724         const int len = sizeof(bp->irq_tbl[0].name);
8725         struct net_device *dev = bp->dev;
8726         int tcs, i;
8727
8728         tcs = netdev_get_num_tc(dev);
8729         if (tcs) {
8730                 int i, off, count;
8731
8732                 for (i = 0; i < tcs; i++) {
8733                         count = bp->tx_nr_rings_per_tc;
8734                         off = i * count;
8735                         netdev_set_tc_queue(dev, i, count, off);
8736                 }
8737         }
8738
8739         for (i = 0; i < bp->cp_nr_rings; i++) {
8740                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8741                 char *attr;
8742
8743                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8744                         attr = "TxRx";
8745                 else if (i < bp->rx_nr_rings)
8746                         attr = "rx";
8747                 else
8748                         attr = "tx";
8749
8750                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8751                          attr, i);
8752                 bp->irq_tbl[map_idx].handler = bnxt_msix;
8753         }
8754 }
8755
8756 static void bnxt_setup_inta(struct bnxt *bp)
8757 {
8758         const int len = sizeof(bp->irq_tbl[0].name);
8759
8760         if (netdev_get_num_tc(bp->dev))
8761                 netdev_reset_tc(bp->dev);
8762
8763         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8764                  0);
8765         bp->irq_tbl[0].handler = bnxt_inta;
8766 }
8767
8768 static int bnxt_init_int_mode(struct bnxt *bp);
8769
8770 static int bnxt_setup_int_mode(struct bnxt *bp)
8771 {
8772         int rc;
8773
8774         if (!bp->irq_tbl) {
8775                 rc = bnxt_init_int_mode(bp);
8776                 if (rc || !bp->irq_tbl)
8777                         return rc ?: -ENODEV;
8778         }
8779
8780         if (bp->flags & BNXT_FLAG_USING_MSIX)
8781                 bnxt_setup_msix(bp);
8782         else
8783                 bnxt_setup_inta(bp);
8784
8785         rc = bnxt_set_real_num_queues(bp);
8786         return rc;
8787 }
8788
8789 #ifdef CONFIG_RFS_ACCEL
8790 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8791 {
8792         return bp->hw_resc.max_rsscos_ctxs;
8793 }
8794
8795 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8796 {
8797         return bp->hw_resc.max_vnics;
8798 }
8799 #endif
8800
8801 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8802 {
8803         return bp->hw_resc.max_stat_ctxs;
8804 }
8805
8806 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8807 {
8808         return bp->hw_resc.max_cp_rings;
8809 }
8810
8811 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8812 {
8813         unsigned int cp = bp->hw_resc.max_cp_rings;
8814
8815         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8816                 cp -= bnxt_get_ulp_msix_num(bp);
8817
8818         return cp;
8819 }
8820
8821 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8822 {
8823         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8824
8825         if (bp->flags & BNXT_FLAG_CHIP_P5)
8826                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8827
8828         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8829 }
8830
8831 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8832 {
8833         bp->hw_resc.max_irqs = max_irqs;
8834 }
8835
8836 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8837 {
8838         unsigned int cp;
8839
8840         cp = bnxt_get_max_func_cp_rings_for_en(bp);
8841         if (bp->flags & BNXT_FLAG_CHIP_P5)
8842                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8843         else
8844                 return cp - bp->cp_nr_rings;
8845 }
8846
8847 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8848 {
8849         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8850 }
8851
8852 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8853 {
8854         int max_cp = bnxt_get_max_func_cp_rings(bp);
8855         int max_irq = bnxt_get_max_func_irqs(bp);
8856         int total_req = bp->cp_nr_rings + num;
8857         int max_idx, avail_msix;
8858
8859         max_idx = bp->total_irqs;
8860         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8861                 max_idx = min_t(int, bp->total_irqs, max_cp);
8862         avail_msix = max_idx - bp->cp_nr_rings;
8863         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8864                 return avail_msix;
8865
8866         if (max_irq < total_req) {
8867                 num = max_irq - bp->cp_nr_rings;
8868                 if (num <= 0)
8869                         return 0;
8870         }
8871         return num;
8872 }
8873
8874 static int bnxt_get_num_msix(struct bnxt *bp)
8875 {
8876         if (!BNXT_NEW_RM(bp))
8877                 return bnxt_get_max_func_irqs(bp);
8878
8879         return bnxt_nq_rings_in_use(bp);
8880 }
8881
8882 static int bnxt_init_msix(struct bnxt *bp)
8883 {
8884         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8885         struct msix_entry *msix_ent;
8886
8887         total_vecs = bnxt_get_num_msix(bp);
8888         max = bnxt_get_max_func_irqs(bp);
8889         if (total_vecs > max)
8890                 total_vecs = max;
8891
8892         if (!total_vecs)
8893                 return 0;
8894
8895         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8896         if (!msix_ent)
8897                 return -ENOMEM;
8898
8899         for (i = 0; i < total_vecs; i++) {
8900                 msix_ent[i].entry = i;
8901                 msix_ent[i].vector = 0;
8902         }
8903
8904         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8905                 min = 2;
8906
8907         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8908         ulp_msix = bnxt_get_ulp_msix_num(bp);
8909         if (total_vecs < 0 || total_vecs < ulp_msix) {
8910                 rc = -ENODEV;
8911                 goto msix_setup_exit;
8912         }
8913
8914         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8915         if (bp->irq_tbl) {
8916                 for (i = 0; i < total_vecs; i++)
8917                         bp->irq_tbl[i].vector = msix_ent[i].vector;
8918
8919                 bp->total_irqs = total_vecs;
8920                 /* Trim rings based upon num of vectors allocated */
8921                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8922                                      total_vecs - ulp_msix, min == 1);
8923                 if (rc)
8924                         goto msix_setup_exit;
8925
8926                 bp->cp_nr_rings = (min == 1) ?
8927                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8928                                   bp->tx_nr_rings + bp->rx_nr_rings;
8929
8930         } else {
8931                 rc = -ENOMEM;
8932                 goto msix_setup_exit;
8933         }
8934         bp->flags |= BNXT_FLAG_USING_MSIX;
8935         kfree(msix_ent);
8936         return 0;
8937
8938 msix_setup_exit:
8939         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8940         kfree(bp->irq_tbl);
8941         bp->irq_tbl = NULL;
8942         pci_disable_msix(bp->pdev);
8943         kfree(msix_ent);
8944         return rc;
8945 }
8946
8947 static int bnxt_init_inta(struct bnxt *bp)
8948 {
8949         bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
8950         if (!bp->irq_tbl)
8951                 return -ENOMEM;
8952
8953         bp->total_irqs = 1;
8954         bp->rx_nr_rings = 1;
8955         bp->tx_nr_rings = 1;
8956         bp->cp_nr_rings = 1;
8957         bp->flags |= BNXT_FLAG_SHARED_RINGS;
8958         bp->irq_tbl[0].vector = bp->pdev->irq;
8959         return 0;
8960 }
8961
8962 static int bnxt_init_int_mode(struct bnxt *bp)
8963 {
8964         int rc = -ENODEV;
8965
8966         if (bp->flags & BNXT_FLAG_MSIX_CAP)
8967                 rc = bnxt_init_msix(bp);
8968
8969         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8970                 /* fallback to INTA */
8971                 rc = bnxt_init_inta(bp);
8972         }
8973         return rc;
8974 }
8975
8976 static void bnxt_clear_int_mode(struct bnxt *bp)
8977 {
8978         if (bp->flags & BNXT_FLAG_USING_MSIX)
8979                 pci_disable_msix(bp->pdev);
8980
8981         kfree(bp->irq_tbl);
8982         bp->irq_tbl = NULL;
8983         bp->flags &= ~BNXT_FLAG_USING_MSIX;
8984 }
8985
8986 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8987 {
8988         int tcs = netdev_get_num_tc(bp->dev);
8989         bool irq_cleared = false;
8990         int rc;
8991
8992         if (!bnxt_need_reserve_rings(bp))
8993                 return 0;
8994
8995         if (irq_re_init && BNXT_NEW_RM(bp) &&
8996             bnxt_get_num_msix(bp) != bp->total_irqs) {
8997                 bnxt_ulp_irq_stop(bp);
8998                 bnxt_clear_int_mode(bp);
8999                 irq_cleared = true;
9000         }
9001         rc = __bnxt_reserve_rings(bp);
9002         if (irq_cleared) {
9003                 if (!rc)
9004                         rc = bnxt_init_int_mode(bp);
9005                 bnxt_ulp_irq_restart(bp, rc);
9006         }
9007         if (rc) {
9008                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9009                 return rc;
9010         }
9011         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
9012                 netdev_err(bp->dev, "tx ring reservation failure\n");
9013                 netdev_reset_tc(bp->dev);
9014                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9015                 return -ENOMEM;
9016         }
9017         return 0;
9018 }
9019
9020 static void bnxt_free_irq(struct bnxt *bp)
9021 {
9022         struct bnxt_irq *irq;
9023         int i;
9024
9025 #ifdef CONFIG_RFS_ACCEL
9026         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9027         bp->dev->rx_cpu_rmap = NULL;
9028 #endif
9029         if (!bp->irq_tbl || !bp->bnapi)
9030                 return;
9031
9032         for (i = 0; i < bp->cp_nr_rings; i++) {
9033                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9034
9035                 irq = &bp->irq_tbl[map_idx];
9036                 if (irq->requested) {
9037                         if (irq->have_cpumask) {
9038                                 irq_set_affinity_hint(irq->vector, NULL);
9039                                 free_cpumask_var(irq->cpu_mask);
9040                                 irq->have_cpumask = 0;
9041                         }
9042                         free_irq(irq->vector, bp->bnapi[i]);
9043                 }
9044
9045                 irq->requested = 0;
9046         }
9047 }
9048
9049 static int bnxt_request_irq(struct bnxt *bp)
9050 {
9051         int i, j, rc = 0;
9052         unsigned long flags = 0;
9053 #ifdef CONFIG_RFS_ACCEL
9054         struct cpu_rmap *rmap;
9055 #endif
9056
9057         rc = bnxt_setup_int_mode(bp);
9058         if (rc) {
9059                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9060                            rc);
9061                 return rc;
9062         }
9063 #ifdef CONFIG_RFS_ACCEL
9064         rmap = bp->dev->rx_cpu_rmap;
9065 #endif
9066         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9067                 flags = IRQF_SHARED;
9068
9069         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9070                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9071                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9072
9073 #ifdef CONFIG_RFS_ACCEL
9074                 if (rmap && bp->bnapi[i]->rx_ring) {
9075                         rc = irq_cpu_rmap_add(rmap, irq->vector);
9076                         if (rc)
9077                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9078                                             j);
9079                         j++;
9080                 }
9081 #endif
9082                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9083                                  bp->bnapi[i]);
9084                 if (rc)
9085                         break;
9086
9087                 irq->requested = 1;
9088
9089                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9090                         int numa_node = dev_to_node(&bp->pdev->dev);
9091
9092                         irq->have_cpumask = 1;
9093                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9094                                         irq->cpu_mask);
9095                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9096                         if (rc) {
9097                                 netdev_warn(bp->dev,
9098                                             "Set affinity failed, IRQ = %d\n",
9099                                             irq->vector);
9100                                 break;
9101                         }
9102                 }
9103         }
9104         return rc;
9105 }
9106
9107 static void bnxt_del_napi(struct bnxt *bp)
9108 {
9109         int i;
9110
9111         if (!bp->bnapi)
9112                 return;
9113
9114         for (i = 0; i < bp->cp_nr_rings; i++) {
9115                 struct bnxt_napi *bnapi = bp->bnapi[i];
9116
9117                 __netif_napi_del(&bnapi->napi);
9118         }
9119         /* We called __netif_napi_del(), we need
9120          * to respect an RCU grace period before freeing napi structures.
9121          */
9122         synchronize_net();
9123 }
9124
9125 static void bnxt_init_napi(struct bnxt *bp)
9126 {
9127         int i;
9128         unsigned int cp_nr_rings = bp->cp_nr_rings;
9129         struct bnxt_napi *bnapi;
9130
9131         if (bp->flags & BNXT_FLAG_USING_MSIX) {
9132                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9133
9134                 if (bp->flags & BNXT_FLAG_CHIP_P5)
9135                         poll_fn = bnxt_poll_p5;
9136                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9137                         cp_nr_rings--;
9138                 for (i = 0; i < cp_nr_rings; i++) {
9139                         bnapi = bp->bnapi[i];
9140                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
9141                 }
9142                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9143                         bnapi = bp->bnapi[cp_nr_rings];
9144                         netif_napi_add(bp->dev, &bnapi->napi,
9145                                        bnxt_poll_nitroa0, 64);
9146                 }
9147         } else {
9148                 bnapi = bp->bnapi[0];
9149                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
9150         }
9151 }
9152
9153 static void bnxt_disable_napi(struct bnxt *bp)
9154 {
9155         int i;
9156
9157         if (!bp->bnapi ||
9158             test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9159                 return;
9160
9161         for (i = 0; i < bp->cp_nr_rings; i++) {
9162                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9163
9164                 napi_disable(&bp->bnapi[i]->napi);
9165                 if (bp->bnapi[i]->rx_ring)
9166                         cancel_work_sync(&cpr->dim.work);
9167         }
9168 }
9169
9170 static void bnxt_enable_napi(struct bnxt *bp)
9171 {
9172         int i;
9173
9174         clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9175         for (i = 0; i < bp->cp_nr_rings; i++) {
9176                 struct bnxt_napi *bnapi = bp->bnapi[i];
9177                 struct bnxt_cp_ring_info *cpr;
9178
9179                 cpr = &bnapi->cp_ring;
9180                 if (bnapi->in_reset)
9181                         cpr->sw_stats.rx.rx_resets++;
9182                 bnapi->in_reset = false;
9183
9184                 if (bnapi->rx_ring) {
9185                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9186                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9187                 }
9188                 napi_enable(&bnapi->napi);
9189         }
9190 }
9191
9192 void bnxt_tx_disable(struct bnxt *bp)
9193 {
9194         int i;
9195         struct bnxt_tx_ring_info *txr;
9196
9197         if (bp->tx_ring) {
9198                 for (i = 0; i < bp->tx_nr_rings; i++) {
9199                         txr = &bp->tx_ring[i];
9200                         WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
9201                 }
9202         }
9203         /* Make sure napi polls see @dev_state change */
9204         synchronize_net();
9205         /* Drop carrier first to prevent TX timeout */
9206         netif_carrier_off(bp->dev);
9207         /* Stop all TX queues */
9208         netif_tx_disable(bp->dev);
9209 }
9210
9211 void bnxt_tx_enable(struct bnxt *bp)
9212 {
9213         int i;
9214         struct bnxt_tx_ring_info *txr;
9215
9216         for (i = 0; i < bp->tx_nr_rings; i++) {
9217                 txr = &bp->tx_ring[i];
9218                 WRITE_ONCE(txr->dev_state, 0);
9219         }
9220         /* Make sure napi polls see @dev_state change */
9221         synchronize_net();
9222         netif_tx_wake_all_queues(bp->dev);
9223         if (bp->link_info.link_up)
9224                 netif_carrier_on(bp->dev);
9225 }
9226
9227 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9228 {
9229         u8 active_fec = link_info->active_fec_sig_mode &
9230                         PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9231
9232         switch (active_fec) {
9233         default:
9234         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9235                 return "None";
9236         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9237                 return "Clause 74 BaseR";
9238         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9239                 return "Clause 91 RS(528,514)";
9240         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9241                 return "Clause 91 RS544_1XN";
9242         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9243                 return "Clause 91 RS(544,514)";
9244         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9245                 return "Clause 91 RS272_1XN";
9246         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9247                 return "Clause 91 RS(272,257)";
9248         }
9249 }
9250
9251 static void bnxt_report_link(struct bnxt *bp)
9252 {
9253         if (bp->link_info.link_up) {
9254                 const char *signal = "";
9255                 const char *flow_ctrl;
9256                 const char *duplex;
9257                 u32 speed;
9258                 u16 fec;
9259
9260                 netif_carrier_on(bp->dev);
9261                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9262                 if (speed == SPEED_UNKNOWN) {
9263                         netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9264                         return;
9265                 }
9266                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9267                         duplex = "full";
9268                 else
9269                         duplex = "half";
9270                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9271                         flow_ctrl = "ON - receive & transmit";
9272                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9273                         flow_ctrl = "ON - transmit";
9274                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9275                         flow_ctrl = "ON - receive";
9276                 else
9277                         flow_ctrl = "none";
9278                 if (bp->link_info.phy_qcfg_resp.option_flags &
9279                     PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9280                         u8 sig_mode = bp->link_info.active_fec_sig_mode &
9281                                       PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9282                         switch (sig_mode) {
9283                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9284                                 signal = "(NRZ) ";
9285                                 break;
9286                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9287                                 signal = "(PAM4) ";
9288                                 break;
9289                         default:
9290                                 break;
9291                         }
9292                 }
9293                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9294                             speed, signal, duplex, flow_ctrl);
9295                 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9296                         netdev_info(bp->dev, "EEE is %s\n",
9297                                     bp->eee.eee_active ? "active" :
9298                                                          "not active");
9299                 fec = bp->link_info.fec_cfg;
9300                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9301                         netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9302                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9303                                     bnxt_report_fec(&bp->link_info));
9304         } else {
9305                 netif_carrier_off(bp->dev);
9306                 netdev_err(bp->dev, "NIC Link is Down\n");
9307         }
9308 }
9309
9310 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9311 {
9312         if (!resp->supported_speeds_auto_mode &&
9313             !resp->supported_speeds_force_mode &&
9314             !resp->supported_pam4_speeds_auto_mode &&
9315             !resp->supported_pam4_speeds_force_mode)
9316                 return true;
9317         return false;
9318 }
9319
9320 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9321 {
9322         int rc = 0;
9323         struct hwrm_port_phy_qcaps_input req = {0};
9324         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9325         struct bnxt_link_info *link_info = &bp->link_info;
9326
9327         if (bp->hwrm_spec_code < 0x10201)
9328                 return 0;
9329
9330         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
9331
9332         mutex_lock(&bp->hwrm_cmd_lock);
9333         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9334         if (rc)
9335                 goto hwrm_phy_qcaps_exit;
9336
9337         bp->phy_flags = resp->flags;
9338         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9339                 struct ethtool_eee *eee = &bp->eee;
9340                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9341
9342                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9343                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9344                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9345                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9346                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9347         }
9348
9349         if (bp->hwrm_spec_code >= 0x10a01) {
9350                 if (bnxt_phy_qcaps_no_speed(resp)) {
9351                         link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9352                         netdev_warn(bp->dev, "Ethernet link disabled\n");
9353                 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9354                         link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9355                         netdev_info(bp->dev, "Ethernet link enabled\n");
9356                         /* Phy re-enabled, reprobe the speeds */
9357                         link_info->support_auto_speeds = 0;
9358                         link_info->support_pam4_auto_speeds = 0;
9359                 }
9360         }
9361         if (resp->supported_speeds_auto_mode)
9362                 link_info->support_auto_speeds =
9363                         le16_to_cpu(resp->supported_speeds_auto_mode);
9364         if (resp->supported_pam4_speeds_auto_mode)
9365                 link_info->support_pam4_auto_speeds =
9366                         le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9367
9368         bp->port_count = resp->port_cnt;
9369
9370 hwrm_phy_qcaps_exit:
9371         mutex_unlock(&bp->hwrm_cmd_lock);
9372         return rc;
9373 }
9374
9375 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9376 {
9377         u16 diff = advertising ^ supported;
9378
9379         return ((supported | diff) != supported);
9380 }
9381
9382 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9383 {
9384         int rc = 0;
9385         struct bnxt_link_info *link_info = &bp->link_info;
9386         struct hwrm_port_phy_qcfg_input req = {0};
9387         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9388         u8 link_up = link_info->link_up;
9389         bool support_changed = false;
9390
9391         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
9392
9393         mutex_lock(&bp->hwrm_cmd_lock);
9394         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9395         if (rc) {
9396                 mutex_unlock(&bp->hwrm_cmd_lock);
9397                 return rc;
9398         }
9399
9400         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9401         link_info->phy_link_status = resp->link;
9402         link_info->duplex = resp->duplex_cfg;
9403         if (bp->hwrm_spec_code >= 0x10800)
9404                 link_info->duplex = resp->duplex_state;
9405         link_info->pause = resp->pause;
9406         link_info->auto_mode = resp->auto_mode;
9407         link_info->auto_pause_setting = resp->auto_pause;
9408         link_info->lp_pause = resp->link_partner_adv_pause;
9409         link_info->force_pause_setting = resp->force_pause;
9410         link_info->duplex_setting = resp->duplex_cfg;
9411         if (link_info->phy_link_status == BNXT_LINK_LINK)
9412                 link_info->link_speed = le16_to_cpu(resp->link_speed);
9413         else
9414                 link_info->link_speed = 0;
9415         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9416         link_info->force_pam4_link_speed =
9417                 le16_to_cpu(resp->force_pam4_link_speed);
9418         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9419         link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9420         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9421         link_info->auto_pam4_link_speeds =
9422                 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9423         link_info->lp_auto_link_speeds =
9424                 le16_to_cpu(resp->link_partner_adv_speeds);
9425         link_info->lp_auto_pam4_link_speeds =
9426                 resp->link_partner_pam4_adv_speeds;
9427         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9428         link_info->phy_ver[0] = resp->phy_maj;
9429         link_info->phy_ver[1] = resp->phy_min;
9430         link_info->phy_ver[2] = resp->phy_bld;
9431         link_info->media_type = resp->media_type;
9432         link_info->phy_type = resp->phy_type;
9433         link_info->transceiver = resp->xcvr_pkg_type;
9434         link_info->phy_addr = resp->eee_config_phy_addr &
9435                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9436         link_info->module_status = resp->module_status;
9437
9438         if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9439                 struct ethtool_eee *eee = &bp->eee;
9440                 u16 fw_speeds;
9441
9442                 eee->eee_active = 0;
9443                 if (resp->eee_config_phy_addr &
9444                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9445                         eee->eee_active = 1;
9446                         fw_speeds = le16_to_cpu(
9447                                 resp->link_partner_adv_eee_link_speed_mask);
9448                         eee->lp_advertised =
9449                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9450                 }
9451
9452                 /* Pull initial EEE config */
9453                 if (!chng_link_state) {
9454                         if (resp->eee_config_phy_addr &
9455                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9456                                 eee->eee_enabled = 1;
9457
9458                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9459                         eee->advertised =
9460                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9461
9462                         if (resp->eee_config_phy_addr &
9463                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9464                                 __le32 tmr;
9465
9466                                 eee->tx_lpi_enabled = 1;
9467                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9468                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9469                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9470                         }
9471                 }
9472         }
9473
9474         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9475         if (bp->hwrm_spec_code >= 0x10504) {
9476                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9477                 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9478         }
9479         /* TODO: need to add more logic to report VF link */
9480         if (chng_link_state) {
9481                 if (link_info->phy_link_status == BNXT_LINK_LINK)
9482                         link_info->link_up = 1;
9483                 else
9484                         link_info->link_up = 0;
9485                 if (link_up != link_info->link_up)
9486                         bnxt_report_link(bp);
9487         } else {
9488                 /* alwasy link down if not require to update link state */
9489                 link_info->link_up = 0;
9490         }
9491         mutex_unlock(&bp->hwrm_cmd_lock);
9492
9493         if (!BNXT_PHY_CFG_ABLE(bp))
9494                 return 0;
9495
9496         /* Check if any advertised speeds are no longer supported. The caller
9497          * holds the link_lock mutex, so we can modify link_info settings.
9498          */
9499         if (bnxt_support_dropped(link_info->advertising,
9500                                  link_info->support_auto_speeds)) {
9501                 link_info->advertising = link_info->support_auto_speeds;
9502                 support_changed = true;
9503         }
9504         if (bnxt_support_dropped(link_info->advertising_pam4,
9505                                  link_info->support_pam4_auto_speeds)) {
9506                 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9507                 support_changed = true;
9508         }
9509         if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9510                 bnxt_hwrm_set_link_setting(bp, true, false);
9511         return 0;
9512 }
9513
9514 static void bnxt_get_port_module_status(struct bnxt *bp)
9515 {
9516         struct bnxt_link_info *link_info = &bp->link_info;
9517         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9518         u8 module_status;
9519
9520         if (bnxt_update_link(bp, true))
9521                 return;
9522
9523         module_status = link_info->module_status;
9524         switch (module_status) {
9525         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9526         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9527         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9528                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9529                             bp->pf.port_id);
9530                 if (bp->hwrm_spec_code >= 0x10201) {
9531                         netdev_warn(bp->dev, "Module part number %s\n",
9532                                     resp->phy_vendor_partnumber);
9533                 }
9534                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9535                         netdev_warn(bp->dev, "TX is disabled\n");
9536                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9537                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9538         }
9539 }
9540
9541 static void
9542 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9543 {
9544         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9545                 if (bp->hwrm_spec_code >= 0x10201)
9546                         req->auto_pause =
9547                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9548                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9549                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9550                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9551                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9552                 req->enables |=
9553                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9554         } else {
9555                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9556                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9557                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9558                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9559                 req->enables |=
9560                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9561                 if (bp->hwrm_spec_code >= 0x10201) {
9562                         req->auto_pause = req->force_pause;
9563                         req->enables |= cpu_to_le32(
9564                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9565                 }
9566         }
9567 }
9568
9569 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9570 {
9571         if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9572                 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9573                 if (bp->link_info.advertising) {
9574                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9575                         req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9576                 }
9577                 if (bp->link_info.advertising_pam4) {
9578                         req->enables |=
9579                                 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9580                         req->auto_link_pam4_speed_mask =
9581                                 cpu_to_le16(bp->link_info.advertising_pam4);
9582                 }
9583                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9584                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9585         } else {
9586                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9587                 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9588                         req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9589                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9590                 } else {
9591                         req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9592                 }
9593         }
9594
9595         /* tell chimp that the setting takes effect immediately */
9596         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9597 }
9598
9599 int bnxt_hwrm_set_pause(struct bnxt *bp)
9600 {
9601         struct hwrm_port_phy_cfg_input req = {0};
9602         int rc;
9603
9604         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9605         bnxt_hwrm_set_pause_common(bp, &req);
9606
9607         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9608             bp->link_info.force_link_chng)
9609                 bnxt_hwrm_set_link_common(bp, &req);
9610
9611         mutex_lock(&bp->hwrm_cmd_lock);
9612         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9613         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9614                 /* since changing of pause setting doesn't trigger any link
9615                  * change event, the driver needs to update the current pause
9616                  * result upon successfully return of the phy_cfg command
9617                  */
9618                 bp->link_info.pause =
9619                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9620                 bp->link_info.auto_pause_setting = 0;
9621                 if (!bp->link_info.force_link_chng)
9622                         bnxt_report_link(bp);
9623         }
9624         bp->link_info.force_link_chng = false;
9625         mutex_unlock(&bp->hwrm_cmd_lock);
9626         return rc;
9627 }
9628
9629 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9630                               struct hwrm_port_phy_cfg_input *req)
9631 {
9632         struct ethtool_eee *eee = &bp->eee;
9633
9634         if (eee->eee_enabled) {
9635                 u16 eee_speeds;
9636                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9637
9638                 if (eee->tx_lpi_enabled)
9639                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9640                 else
9641                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9642
9643                 req->flags |= cpu_to_le32(flags);
9644                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9645                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9646                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9647         } else {
9648                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9649         }
9650 }
9651
9652 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9653 {
9654         struct hwrm_port_phy_cfg_input req = {0};
9655
9656         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9657         if (set_pause)
9658                 bnxt_hwrm_set_pause_common(bp, &req);
9659
9660         bnxt_hwrm_set_link_common(bp, &req);
9661
9662         if (set_eee)
9663                 bnxt_hwrm_set_eee(bp, &req);
9664         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9665 }
9666
9667 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9668 {
9669         struct hwrm_port_phy_cfg_input req = {0};
9670
9671         if (!BNXT_SINGLE_PF(bp))
9672                 return 0;
9673
9674         if (pci_num_vf(bp->pdev) &&
9675             !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9676                 return 0;
9677
9678         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9679         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9680         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9681 }
9682
9683 static int bnxt_fw_init_one(struct bnxt *bp);
9684
9685 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9686 {
9687 #ifdef CONFIG_TEE_BNXT_FW
9688         int rc = tee_bnxt_fw_load();
9689
9690         if (rc)
9691                 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9692
9693         return rc;
9694 #else
9695         netdev_err(bp->dev, "OP-TEE not supported\n");
9696         return -ENODEV;
9697 #endif
9698 }
9699
9700 static int bnxt_try_recover_fw(struct bnxt *bp)
9701 {
9702         if (bp->fw_health && bp->fw_health->status_reliable) {
9703                 int retry = 0, rc;
9704                 u32 sts;
9705
9706                 mutex_lock(&bp->hwrm_cmd_lock);
9707                 do {
9708                         sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9709                         rc = __bnxt_hwrm_ver_get(bp, true);
9710                         if (!BNXT_FW_IS_BOOTING(sts) &&
9711                             !BNXT_FW_IS_RECOVERING(sts))
9712                                 break;
9713                         retry++;
9714                 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9715                 mutex_unlock(&bp->hwrm_cmd_lock);
9716
9717                 if (!BNXT_FW_IS_HEALTHY(sts)) {
9718                         netdev_err(bp->dev,
9719                                    "Firmware not responding, status: 0x%x\n",
9720                                    sts);
9721                         rc = -ENODEV;
9722                 }
9723                 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9724                         netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9725                         return bnxt_fw_reset_via_optee(bp);
9726                 }
9727                 return rc;
9728         }
9729
9730         return -ENODEV;
9731 }
9732
9733 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9734 {
9735         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9736         struct hwrm_func_drv_if_change_input req = {0};
9737         bool fw_reset = !bp->irq_tbl;
9738         bool resc_reinit = false;
9739         int rc, retry = 0;
9740         u32 flags = 0;
9741
9742         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9743                 return 0;
9744
9745         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9746         if (up)
9747                 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9748         mutex_lock(&bp->hwrm_cmd_lock);
9749         while (retry < BNXT_FW_IF_RETRY) {
9750                 rc = _hwrm_send_message(bp, &req, sizeof(req),
9751                                         HWRM_CMD_TIMEOUT);
9752                 if (rc != -EAGAIN)
9753                         break;
9754
9755                 msleep(50);
9756                 retry++;
9757         }
9758         if (!rc)
9759                 flags = le32_to_cpu(resp->flags);
9760         mutex_unlock(&bp->hwrm_cmd_lock);
9761
9762         if (rc == -EAGAIN)
9763                 return rc;
9764         if (rc && up) {
9765                 rc = bnxt_try_recover_fw(bp);
9766                 fw_reset = true;
9767         }
9768         if (rc)
9769                 return rc;
9770
9771         if (!up) {
9772                 bnxt_inv_fw_health_reg(bp);
9773                 return 0;
9774         }
9775
9776         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9777                 resc_reinit = true;
9778         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9779                 fw_reset = true;
9780         else if (bp->fw_health && !bp->fw_health->status_reliable)
9781                 bnxt_try_map_fw_health_reg(bp);
9782
9783         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9784                 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9785                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9786                 return -ENODEV;
9787         }
9788         if (resc_reinit || fw_reset) {
9789                 if (fw_reset) {
9790                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9791                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9792                                 bnxt_ulp_stop(bp);
9793                         bnxt_free_ctx_mem(bp);
9794                         kfree(bp->ctx);
9795                         bp->ctx = NULL;
9796                         bnxt_dcb_free(bp);
9797                         rc = bnxt_fw_init_one(bp);
9798                         if (rc) {
9799                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9800                                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9801                                 return rc;
9802                         }
9803                         bnxt_clear_int_mode(bp);
9804                         rc = bnxt_init_int_mode(bp);
9805                         if (rc) {
9806                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9807                                 netdev_err(bp->dev, "init int mode failed\n");
9808                                 return rc;
9809                         }
9810                 }
9811                 if (BNXT_NEW_RM(bp)) {
9812                         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9813
9814                         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9815                         if (rc)
9816                                 netdev_err(bp->dev, "resc_qcaps failed\n");
9817
9818                         hw_resc->resv_cp_rings = 0;
9819                         hw_resc->resv_stat_ctxs = 0;
9820                         hw_resc->resv_irqs = 0;
9821                         hw_resc->resv_tx_rings = 0;
9822                         hw_resc->resv_rx_rings = 0;
9823                         hw_resc->resv_hw_ring_grps = 0;
9824                         hw_resc->resv_vnics = 0;
9825                         if (!fw_reset) {
9826                                 bp->tx_nr_rings = 0;
9827                                 bp->rx_nr_rings = 0;
9828                         }
9829                 }
9830         }
9831         return rc;
9832 }
9833
9834 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9835 {
9836         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9837         struct hwrm_port_led_qcaps_input req = {0};
9838         struct bnxt_pf_info *pf = &bp->pf;
9839         int rc;
9840
9841         bp->num_leds = 0;
9842         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9843                 return 0;
9844
9845         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9846         req.port_id = cpu_to_le16(pf->port_id);
9847         mutex_lock(&bp->hwrm_cmd_lock);
9848         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9849         if (rc) {
9850                 mutex_unlock(&bp->hwrm_cmd_lock);
9851                 return rc;
9852         }
9853         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9854                 int i;
9855
9856                 bp->num_leds = resp->num_leds;
9857                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9858                                                  bp->num_leds);
9859                 for (i = 0; i < bp->num_leds; i++) {
9860                         struct bnxt_led_info *led = &bp->leds[i];
9861                         __le16 caps = led->led_state_caps;
9862
9863                         if (!led->led_group_id ||
9864                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
9865                                 bp->num_leds = 0;
9866                                 break;
9867                         }
9868                 }
9869         }
9870         mutex_unlock(&bp->hwrm_cmd_lock);
9871         return 0;
9872 }
9873
9874 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9875 {
9876         struct hwrm_wol_filter_alloc_input req = {0};
9877         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9878         int rc;
9879
9880         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9881         req.port_id = cpu_to_le16(bp->pf.port_id);
9882         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9883         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9884         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9885         mutex_lock(&bp->hwrm_cmd_lock);
9886         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9887         if (!rc)
9888                 bp->wol_filter_id = resp->wol_filter_id;
9889         mutex_unlock(&bp->hwrm_cmd_lock);
9890         return rc;
9891 }
9892
9893 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9894 {
9895         struct hwrm_wol_filter_free_input req = {0};
9896
9897         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9898         req.port_id = cpu_to_le16(bp->pf.port_id);
9899         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9900         req.wol_filter_id = bp->wol_filter_id;
9901         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9902 }
9903
9904 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9905 {
9906         struct hwrm_wol_filter_qcfg_input req = {0};
9907         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9908         u16 next_handle = 0;
9909         int rc;
9910
9911         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
9912         req.port_id = cpu_to_le16(bp->pf.port_id);
9913         req.handle = cpu_to_le16(handle);
9914         mutex_lock(&bp->hwrm_cmd_lock);
9915         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9916         if (!rc) {
9917                 next_handle = le16_to_cpu(resp->next_handle);
9918                 if (next_handle != 0) {
9919                         if (resp->wol_type ==
9920                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9921                                 bp->wol = 1;
9922                                 bp->wol_filter_id = resp->wol_filter_id;
9923                         }
9924                 }
9925         }
9926         mutex_unlock(&bp->hwrm_cmd_lock);
9927         return next_handle;
9928 }
9929
9930 static void bnxt_get_wol_settings(struct bnxt *bp)
9931 {
9932         u16 handle = 0;
9933
9934         bp->wol = 0;
9935         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9936                 return;
9937
9938         do {
9939                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9940         } while (handle && handle != 0xffff);
9941 }
9942
9943 #ifdef CONFIG_BNXT_HWMON
9944 static ssize_t bnxt_show_temp(struct device *dev,
9945                               struct device_attribute *devattr, char *buf)
9946 {
9947         struct hwrm_temp_monitor_query_input req = {0};
9948         struct hwrm_temp_monitor_query_output *resp;
9949         struct bnxt *bp = dev_get_drvdata(dev);
9950         u32 len = 0;
9951         int rc;
9952
9953         resp = bp->hwrm_cmd_resp_addr;
9954         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9955         mutex_lock(&bp->hwrm_cmd_lock);
9956         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9957         if (!rc)
9958                 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9959         mutex_unlock(&bp->hwrm_cmd_lock);
9960         if (rc)
9961                 return rc;
9962         return len;
9963 }
9964 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9965
9966 static struct attribute *bnxt_attrs[] = {
9967         &sensor_dev_attr_temp1_input.dev_attr.attr,
9968         NULL
9969 };
9970 ATTRIBUTE_GROUPS(bnxt);
9971
9972 static void bnxt_hwmon_close(struct bnxt *bp)
9973 {
9974         if (bp->hwmon_dev) {
9975                 hwmon_device_unregister(bp->hwmon_dev);
9976                 bp->hwmon_dev = NULL;
9977         }
9978 }
9979
9980 static void bnxt_hwmon_open(struct bnxt *bp)
9981 {
9982         struct hwrm_temp_monitor_query_input req = {0};
9983         struct pci_dev *pdev = bp->pdev;
9984         int rc;
9985
9986         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9987         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9988         if (rc == -EACCES || rc == -EOPNOTSUPP) {
9989                 bnxt_hwmon_close(bp);
9990                 return;
9991         }
9992
9993         if (bp->hwmon_dev)
9994                 return;
9995
9996         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9997                                                           DRV_MODULE_NAME, bp,
9998                                                           bnxt_groups);
9999         if (IS_ERR(bp->hwmon_dev)) {
10000                 bp->hwmon_dev = NULL;
10001                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10002         }
10003 }
10004 #else
10005 static void bnxt_hwmon_close(struct bnxt *bp)
10006 {
10007 }
10008
10009 static void bnxt_hwmon_open(struct bnxt *bp)
10010 {
10011 }
10012 #endif
10013
10014 static bool bnxt_eee_config_ok(struct bnxt *bp)
10015 {
10016         struct ethtool_eee *eee = &bp->eee;
10017         struct bnxt_link_info *link_info = &bp->link_info;
10018
10019         if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10020                 return true;
10021
10022         if (eee->eee_enabled) {
10023                 u32 advertising =
10024                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10025
10026                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10027                         eee->eee_enabled = 0;
10028                         return false;
10029                 }
10030                 if (eee->advertised & ~advertising) {
10031                         eee->advertised = advertising & eee->supported;
10032                         return false;
10033                 }
10034         }
10035         return true;
10036 }
10037
10038 static int bnxt_update_phy_setting(struct bnxt *bp)
10039 {
10040         int rc;
10041         bool update_link = false;
10042         bool update_pause = false;
10043         bool update_eee = false;
10044         struct bnxt_link_info *link_info = &bp->link_info;
10045
10046         rc = bnxt_update_link(bp, true);
10047         if (rc) {
10048                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10049                            rc);
10050                 return rc;
10051         }
10052         if (!BNXT_SINGLE_PF(bp))
10053                 return 0;
10054
10055         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10056             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10057             link_info->req_flow_ctrl)
10058                 update_pause = true;
10059         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10060             link_info->force_pause_setting != link_info->req_flow_ctrl)
10061                 update_pause = true;
10062         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10063                 if (BNXT_AUTO_MODE(link_info->auto_mode))
10064                         update_link = true;
10065                 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10066                     link_info->req_link_speed != link_info->force_link_speed)
10067                         update_link = true;
10068                 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10069                          link_info->req_link_speed != link_info->force_pam4_link_speed)
10070                         update_link = true;
10071                 if (link_info->req_duplex != link_info->duplex_setting)
10072                         update_link = true;
10073         } else {
10074                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10075                         update_link = true;
10076                 if (link_info->advertising != link_info->auto_link_speeds ||
10077                     link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10078                         update_link = true;
10079         }
10080
10081         /* The last close may have shutdown the link, so need to call
10082          * PHY_CFG to bring it back up.
10083          */
10084         if (!bp->link_info.link_up)
10085                 update_link = true;
10086
10087         if (!bnxt_eee_config_ok(bp))
10088                 update_eee = true;
10089
10090         if (update_link)
10091                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10092         else if (update_pause)
10093                 rc = bnxt_hwrm_set_pause(bp);
10094         if (rc) {
10095                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10096                            rc);
10097                 return rc;
10098         }
10099
10100         return rc;
10101 }
10102
10103 /* Common routine to pre-map certain register block to different GRC window.
10104  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10105  * in PF and 3 windows in VF that can be customized to map in different
10106  * register blocks.
10107  */
10108 static void bnxt_preset_reg_win(struct bnxt *bp)
10109 {
10110         if (BNXT_PF(bp)) {
10111                 /* CAG registers map to GRC window #4 */
10112                 writel(BNXT_CAG_REG_BASE,
10113                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10114         }
10115 }
10116
10117 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10118
10119 static int bnxt_reinit_after_abort(struct bnxt *bp)
10120 {
10121         int rc;
10122
10123         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10124                 return -EBUSY;
10125
10126         if (bp->dev->reg_state == NETREG_UNREGISTERED)
10127                 return -ENODEV;
10128
10129         rc = bnxt_fw_init_one(bp);
10130         if (!rc) {
10131                 bnxt_clear_int_mode(bp);
10132                 rc = bnxt_init_int_mode(bp);
10133                 if (!rc) {
10134                         clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10135                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10136                 }
10137         }
10138         return rc;
10139 }
10140
10141 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10142 {
10143         int rc = 0;
10144
10145         bnxt_preset_reg_win(bp);
10146         netif_carrier_off(bp->dev);
10147         if (irq_re_init) {
10148                 /* Reserve rings now if none were reserved at driver probe. */
10149                 rc = bnxt_init_dflt_ring_mode(bp);
10150                 if (rc) {
10151                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10152                         return rc;
10153                 }
10154         }
10155         rc = bnxt_reserve_rings(bp, irq_re_init);
10156         if (rc)
10157                 return rc;
10158         if ((bp->flags & BNXT_FLAG_RFS) &&
10159             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10160                 /* disable RFS if falling back to INTA */
10161                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10162                 bp->flags &= ~BNXT_FLAG_RFS;
10163         }
10164
10165         rc = bnxt_alloc_mem(bp, irq_re_init);
10166         if (rc) {
10167                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10168                 goto open_err_free_mem;
10169         }
10170
10171         if (irq_re_init) {
10172                 bnxt_init_napi(bp);
10173                 rc = bnxt_request_irq(bp);
10174                 if (rc) {
10175                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10176                         goto open_err_irq;
10177                 }
10178         }
10179
10180         rc = bnxt_init_nic(bp, irq_re_init);
10181         if (rc) {
10182                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10183                 goto open_err_irq;
10184         }
10185
10186         bnxt_enable_napi(bp);
10187         bnxt_debug_dev_init(bp);
10188
10189         if (link_re_init) {
10190                 mutex_lock(&bp->link_lock);
10191                 rc = bnxt_update_phy_setting(bp);
10192                 mutex_unlock(&bp->link_lock);
10193                 if (rc) {
10194                         netdev_warn(bp->dev, "failed to update phy settings\n");
10195                         if (BNXT_SINGLE_PF(bp)) {
10196                                 bp->link_info.phy_retry = true;
10197                                 bp->link_info.phy_retry_expires =
10198                                         jiffies + 5 * HZ;
10199                         }
10200                 }
10201         }
10202
10203         if (irq_re_init)
10204                 udp_tunnel_nic_reset_ntf(bp->dev);
10205
10206         set_bit(BNXT_STATE_OPEN, &bp->state);
10207         bnxt_enable_int(bp);
10208         /* Enable TX queues */
10209         bnxt_tx_enable(bp);
10210         mod_timer(&bp->timer, jiffies + bp->current_interval);
10211         /* Poll link status and check for SFP+ module status */
10212         bnxt_get_port_module_status(bp);
10213
10214         /* VF-reps may need to be re-opened after the PF is re-opened */
10215         if (BNXT_PF(bp))
10216                 bnxt_vf_reps_open(bp);
10217         return 0;
10218
10219 open_err_irq:
10220         bnxt_del_napi(bp);
10221
10222 open_err_free_mem:
10223         bnxt_free_skbs(bp);
10224         bnxt_free_irq(bp);
10225         bnxt_free_mem(bp, true);
10226         return rc;
10227 }
10228
10229 /* rtnl_lock held */
10230 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10231 {
10232         int rc = 0;
10233
10234         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10235                 rc = -EIO;
10236         if (!rc)
10237                 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10238         if (rc) {
10239                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10240                 dev_close(bp->dev);
10241         }
10242         return rc;
10243 }
10244
10245 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10246  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
10247  * self tests.
10248  */
10249 int bnxt_half_open_nic(struct bnxt *bp)
10250 {
10251         int rc = 0;
10252
10253         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10254                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10255                 rc = -ENODEV;
10256                 goto half_open_err;
10257         }
10258
10259         rc = bnxt_alloc_mem(bp, false);
10260         if (rc) {
10261                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10262                 goto half_open_err;
10263         }
10264         rc = bnxt_init_nic(bp, false);
10265         if (rc) {
10266                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10267                 goto half_open_err;
10268         }
10269         return 0;
10270
10271 half_open_err:
10272         bnxt_free_skbs(bp);
10273         bnxt_free_mem(bp, false);
10274         dev_close(bp->dev);
10275         return rc;
10276 }
10277
10278 /* rtnl_lock held, this call can only be made after a previous successful
10279  * call to bnxt_half_open_nic().
10280  */
10281 void bnxt_half_close_nic(struct bnxt *bp)
10282 {
10283         bnxt_hwrm_resource_free(bp, false, false);
10284         bnxt_free_skbs(bp);
10285         bnxt_free_mem(bp, false);
10286 }
10287
10288 static void bnxt_reenable_sriov(struct bnxt *bp)
10289 {
10290         if (BNXT_PF(bp)) {
10291                 struct bnxt_pf_info *pf = &bp->pf;
10292                 int n = pf->active_vfs;
10293
10294                 if (n)
10295                         bnxt_cfg_hw_sriov(bp, &n, true);
10296         }
10297 }
10298
10299 static int bnxt_open(struct net_device *dev)
10300 {
10301         struct bnxt *bp = netdev_priv(dev);
10302         int rc;
10303
10304         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10305                 rc = bnxt_reinit_after_abort(bp);
10306                 if (rc) {
10307                         if (rc == -EBUSY)
10308                                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10309                         else
10310                                 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10311                         return -ENODEV;
10312                 }
10313         }
10314
10315         rc = bnxt_hwrm_if_change(bp, true);
10316         if (rc)
10317                 return rc;
10318
10319         if (bnxt_ptp_init(bp)) {
10320                 netdev_warn(dev, "PTP initialization failed.\n");
10321                 kfree(bp->ptp_cfg);
10322                 bp->ptp_cfg = NULL;
10323         }
10324         rc = __bnxt_open_nic(bp, true, true);
10325         if (rc) {
10326                 bnxt_hwrm_if_change(bp, false);
10327                 bnxt_ptp_clear(bp);
10328         } else {
10329                 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10330                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10331                                 bnxt_ulp_start(bp, 0);
10332                                 bnxt_reenable_sriov(bp);
10333                         }
10334                 }
10335                 bnxt_hwmon_open(bp);
10336         }
10337
10338         return rc;
10339 }
10340
10341 static bool bnxt_drv_busy(struct bnxt *bp)
10342 {
10343         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10344                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10345 }
10346
10347 static void bnxt_get_ring_stats(struct bnxt *bp,
10348                                 struct rtnl_link_stats64 *stats);
10349
10350 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10351                              bool link_re_init)
10352 {
10353         /* Close the VF-reps before closing PF */
10354         if (BNXT_PF(bp))
10355                 bnxt_vf_reps_close(bp);
10356
10357         /* Change device state to avoid TX queue wake up's */
10358         bnxt_tx_disable(bp);
10359
10360         clear_bit(BNXT_STATE_OPEN, &bp->state);
10361         smp_mb__after_atomic();
10362         while (bnxt_drv_busy(bp))
10363                 msleep(20);
10364
10365         /* Flush rings and and disable interrupts */
10366         bnxt_shutdown_nic(bp, irq_re_init);
10367
10368         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10369
10370         bnxt_debug_dev_exit(bp);
10371         bnxt_disable_napi(bp);
10372         del_timer_sync(&bp->timer);
10373         bnxt_free_skbs(bp);
10374
10375         /* Save ring stats before shutdown */
10376         if (bp->bnapi && irq_re_init)
10377                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10378         if (irq_re_init) {
10379                 bnxt_free_irq(bp);
10380                 bnxt_del_napi(bp);
10381         }
10382         bnxt_free_mem(bp, irq_re_init);
10383 }
10384
10385 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10386 {
10387         int rc = 0;
10388
10389         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10390                 /* If we get here, it means firmware reset is in progress
10391                  * while we are trying to close.  We can safely proceed with
10392                  * the close because we are holding rtnl_lock().  Some firmware
10393                  * messages may fail as we proceed to close.  We set the
10394                  * ABORT_ERR flag here so that the FW reset thread will later
10395                  * abort when it gets the rtnl_lock() and sees the flag.
10396                  */
10397                 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10398                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10399         }
10400
10401 #ifdef CONFIG_BNXT_SRIOV
10402         if (bp->sriov_cfg) {
10403                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10404                                                       !bp->sriov_cfg,
10405                                                       BNXT_SRIOV_CFG_WAIT_TMO);
10406                 if (rc)
10407                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10408         }
10409 #endif
10410         __bnxt_close_nic(bp, irq_re_init, link_re_init);
10411         return rc;
10412 }
10413
10414 static int bnxt_close(struct net_device *dev)
10415 {
10416         struct bnxt *bp = netdev_priv(dev);
10417
10418         bnxt_ptp_clear(bp);
10419         bnxt_hwmon_close(bp);
10420         bnxt_close_nic(bp, true, true);
10421         bnxt_hwrm_shutdown_link(bp);
10422         bnxt_hwrm_if_change(bp, false);
10423         return 0;
10424 }
10425
10426 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10427                                    u16 *val)
10428 {
10429         struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
10430         struct hwrm_port_phy_mdio_read_input req = {0};
10431         int rc;
10432
10433         if (bp->hwrm_spec_code < 0x10a00)
10434                 return -EOPNOTSUPP;
10435
10436         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
10437         req.port_id = cpu_to_le16(bp->pf.port_id);
10438         req.phy_addr = phy_addr;
10439         req.reg_addr = cpu_to_le16(reg & 0x1f);
10440         if (mdio_phy_id_is_c45(phy_addr)) {
10441                 req.cl45_mdio = 1;
10442                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10443                 req.dev_addr = mdio_phy_id_devad(phy_addr);
10444                 req.reg_addr = cpu_to_le16(reg);
10445         }
10446
10447         mutex_lock(&bp->hwrm_cmd_lock);
10448         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10449         if (!rc)
10450                 *val = le16_to_cpu(resp->reg_data);
10451         mutex_unlock(&bp->hwrm_cmd_lock);
10452         return rc;
10453 }
10454
10455 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10456                                     u16 val)
10457 {
10458         struct hwrm_port_phy_mdio_write_input req = {0};
10459
10460         if (bp->hwrm_spec_code < 0x10a00)
10461                 return -EOPNOTSUPP;
10462
10463         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
10464         req.port_id = cpu_to_le16(bp->pf.port_id);
10465         req.phy_addr = phy_addr;
10466         req.reg_addr = cpu_to_le16(reg & 0x1f);
10467         if (mdio_phy_id_is_c45(phy_addr)) {
10468                 req.cl45_mdio = 1;
10469                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10470                 req.dev_addr = mdio_phy_id_devad(phy_addr);
10471                 req.reg_addr = cpu_to_le16(reg);
10472         }
10473         req.reg_data = cpu_to_le16(val);
10474
10475         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10476 }
10477
10478 /* rtnl_lock held */
10479 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10480 {
10481         struct mii_ioctl_data *mdio = if_mii(ifr);
10482         struct bnxt *bp = netdev_priv(dev);
10483         int rc;
10484
10485         switch (cmd) {
10486         case SIOCGMIIPHY:
10487                 mdio->phy_id = bp->link_info.phy_addr;
10488
10489                 fallthrough;
10490         case SIOCGMIIREG: {
10491                 u16 mii_regval = 0;
10492
10493                 if (!netif_running(dev))
10494                         return -EAGAIN;
10495
10496                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10497                                              &mii_regval);
10498                 mdio->val_out = mii_regval;
10499                 return rc;
10500         }
10501
10502         case SIOCSMIIREG:
10503                 if (!netif_running(dev))
10504                         return -EAGAIN;
10505
10506                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10507                                                 mdio->val_in);
10508
10509         case SIOCSHWTSTAMP:
10510                 return bnxt_hwtstamp_set(dev, ifr);
10511
10512         case SIOCGHWTSTAMP:
10513                 return bnxt_hwtstamp_get(dev, ifr);
10514
10515         default:
10516                 /* do nothing */
10517                 break;
10518         }
10519         return -EOPNOTSUPP;
10520 }
10521
10522 static void bnxt_get_ring_stats(struct bnxt *bp,
10523                                 struct rtnl_link_stats64 *stats)
10524 {
10525         int i;
10526
10527         for (i = 0; i < bp->cp_nr_rings; i++) {
10528                 struct bnxt_napi *bnapi = bp->bnapi[i];
10529                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10530                 u64 *sw = cpr->stats.sw_stats;
10531
10532                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10533                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10534                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10535
10536                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10537                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10538                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10539
10540                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10541                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10542                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10543
10544                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10545                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10546                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10547
10548                 stats->rx_missed_errors +=
10549                         BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10550
10551                 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10552
10553                 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10554         }
10555 }
10556
10557 static void bnxt_add_prev_stats(struct bnxt *bp,
10558                                 struct rtnl_link_stats64 *stats)
10559 {
10560         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10561
10562         stats->rx_packets += prev_stats->rx_packets;
10563         stats->tx_packets += prev_stats->tx_packets;
10564         stats->rx_bytes += prev_stats->rx_bytes;
10565         stats->tx_bytes += prev_stats->tx_bytes;
10566         stats->rx_missed_errors += prev_stats->rx_missed_errors;
10567         stats->multicast += prev_stats->multicast;
10568         stats->tx_dropped += prev_stats->tx_dropped;
10569 }
10570
10571 static void
10572 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10573 {
10574         struct bnxt *bp = netdev_priv(dev);
10575
10576         set_bit(BNXT_STATE_READ_STATS, &bp->state);
10577         /* Make sure bnxt_close_nic() sees that we are reading stats before
10578          * we check the BNXT_STATE_OPEN flag.
10579          */
10580         smp_mb__after_atomic();
10581         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10582                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10583                 *stats = bp->net_stats_prev;
10584                 return;
10585         }
10586
10587         bnxt_get_ring_stats(bp, stats);
10588         bnxt_add_prev_stats(bp, stats);
10589
10590         if (bp->flags & BNXT_FLAG_PORT_STATS) {
10591                 u64 *rx = bp->port_stats.sw_stats;
10592                 u64 *tx = bp->port_stats.sw_stats +
10593                           BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10594
10595                 stats->rx_crc_errors =
10596                         BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10597                 stats->rx_frame_errors =
10598                         BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10599                 stats->rx_length_errors =
10600                         BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10601                         BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10602                         BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10603                 stats->rx_errors =
10604                         BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10605                         BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10606                 stats->collisions =
10607                         BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10608                 stats->tx_fifo_errors =
10609                         BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10610                 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10611         }
10612         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10613 }
10614
10615 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10616 {
10617         struct net_device *dev = bp->dev;
10618         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10619         struct netdev_hw_addr *ha;
10620         u8 *haddr;
10621         int mc_count = 0;
10622         bool update = false;
10623         int off = 0;
10624
10625         netdev_for_each_mc_addr(ha, dev) {
10626                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10627                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10628                         vnic->mc_list_count = 0;
10629                         return false;
10630                 }
10631                 haddr = ha->addr;
10632                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10633                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10634                         update = true;
10635                 }
10636                 off += ETH_ALEN;
10637                 mc_count++;
10638         }
10639         if (mc_count)
10640                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10641
10642         if (mc_count != vnic->mc_list_count) {
10643                 vnic->mc_list_count = mc_count;
10644                 update = true;
10645         }
10646         return update;
10647 }
10648
10649 static bool bnxt_uc_list_updated(struct bnxt *bp)
10650 {
10651         struct net_device *dev = bp->dev;
10652         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10653         struct netdev_hw_addr *ha;
10654         int off = 0;
10655
10656         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10657                 return true;
10658
10659         netdev_for_each_uc_addr(ha, dev) {
10660                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10661                         return true;
10662
10663                 off += ETH_ALEN;
10664         }
10665         return false;
10666 }
10667
10668 static void bnxt_set_rx_mode(struct net_device *dev)
10669 {
10670         struct bnxt *bp = netdev_priv(dev);
10671         struct bnxt_vnic_info *vnic;
10672         bool mc_update = false;
10673         bool uc_update;
10674         u32 mask;
10675
10676         if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10677                 return;
10678
10679         vnic = &bp->vnic_info[0];
10680         mask = vnic->rx_mask;
10681         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10682                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10683                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10684                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10685
10686         if (dev->flags & IFF_PROMISC)
10687                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10688
10689         uc_update = bnxt_uc_list_updated(bp);
10690
10691         if (dev->flags & IFF_BROADCAST)
10692                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10693         if (dev->flags & IFF_ALLMULTI) {
10694                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10695                 vnic->mc_list_count = 0;
10696         } else {
10697                 mc_update = bnxt_mc_list_updated(bp, &mask);
10698         }
10699
10700         if (mask != vnic->rx_mask || uc_update || mc_update) {
10701                 vnic->rx_mask = mask;
10702
10703                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10704                 bnxt_queue_sp_work(bp);
10705         }
10706 }
10707
10708 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10709 {
10710         struct net_device *dev = bp->dev;
10711         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10712         struct netdev_hw_addr *ha;
10713         int i, off = 0, rc;
10714         bool uc_update;
10715
10716         netif_addr_lock_bh(dev);
10717         uc_update = bnxt_uc_list_updated(bp);
10718         netif_addr_unlock_bh(dev);
10719
10720         if (!uc_update)
10721                 goto skip_uc;
10722
10723         mutex_lock(&bp->hwrm_cmd_lock);
10724         for (i = 1; i < vnic->uc_filter_count; i++) {
10725                 struct hwrm_cfa_l2_filter_free_input req = {0};
10726
10727                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10728                                        -1);
10729
10730                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
10731
10732                 rc = _hwrm_send_message(bp, &req, sizeof(req),
10733                                         HWRM_CMD_TIMEOUT);
10734         }
10735         mutex_unlock(&bp->hwrm_cmd_lock);
10736
10737         vnic->uc_filter_count = 1;
10738
10739         netif_addr_lock_bh(dev);
10740         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10741                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10742         } else {
10743                 netdev_for_each_uc_addr(ha, dev) {
10744                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10745                         off += ETH_ALEN;
10746                         vnic->uc_filter_count++;
10747                 }
10748         }
10749         netif_addr_unlock_bh(dev);
10750
10751         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10752                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10753                 if (rc) {
10754                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10755                                    rc);
10756                         vnic->uc_filter_count = i;
10757                         return rc;
10758                 }
10759         }
10760
10761 skip_uc:
10762         if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10763             !bnxt_promisc_ok(bp))
10764                 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10765         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10766         if (rc && vnic->mc_list_count) {
10767                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10768                             rc);
10769                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10770                 vnic->mc_list_count = 0;
10771                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10772         }
10773         if (rc)
10774                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10775                            rc);
10776
10777         return rc;
10778 }
10779
10780 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10781 {
10782 #ifdef CONFIG_BNXT_SRIOV
10783         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10784                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10785
10786                 /* No minimum rings were provisioned by the PF.  Don't
10787                  * reserve rings by default when device is down.
10788                  */
10789                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10790                         return true;
10791
10792                 if (!netif_running(bp->dev))
10793                         return false;
10794         }
10795 #endif
10796         return true;
10797 }
10798
10799 /* If the chip and firmware supports RFS */
10800 static bool bnxt_rfs_supported(struct bnxt *bp)
10801 {
10802         if (bp->flags & BNXT_FLAG_CHIP_P5) {
10803                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10804                         return true;
10805                 return false;
10806         }
10807         /* 212 firmware is broken for aRFS */
10808         if (BNXT_FW_MAJ(bp) == 212)
10809                 return false;
10810         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10811                 return true;
10812         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10813                 return true;
10814         return false;
10815 }
10816
10817 /* If runtime conditions support RFS */
10818 static bool bnxt_rfs_capable(struct bnxt *bp)
10819 {
10820 #ifdef CONFIG_RFS_ACCEL
10821         int vnics, max_vnics, max_rss_ctxs;
10822
10823         if (bp->flags & BNXT_FLAG_CHIP_P5)
10824                 return bnxt_rfs_supported(bp);
10825         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10826                 return false;
10827
10828         vnics = 1 + bp->rx_nr_rings;
10829         max_vnics = bnxt_get_max_func_vnics(bp);
10830         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10831
10832         /* RSS contexts not a limiting factor */
10833         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10834                 max_rss_ctxs = max_vnics;
10835         if (vnics > max_vnics || vnics > max_rss_ctxs) {
10836                 if (bp->rx_nr_rings > 1)
10837                         netdev_warn(bp->dev,
10838                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10839                                     min(max_rss_ctxs - 1, max_vnics - 1));
10840                 return false;
10841         }
10842
10843         if (!BNXT_NEW_RM(bp))
10844                 return true;
10845
10846         if (vnics == bp->hw_resc.resv_vnics)
10847                 return true;
10848
10849         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10850         if (vnics <= bp->hw_resc.resv_vnics)
10851                 return true;
10852
10853         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10854         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10855         return false;
10856 #else
10857         return false;
10858 #endif
10859 }
10860
10861 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10862                                            netdev_features_t features)
10863 {
10864         struct bnxt *bp = netdev_priv(dev);
10865         netdev_features_t vlan_features;
10866
10867         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10868                 features &= ~NETIF_F_NTUPLE;
10869
10870         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10871                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10872
10873         if (!(features & NETIF_F_GRO))
10874                 features &= ~NETIF_F_GRO_HW;
10875
10876         if (features & NETIF_F_GRO_HW)
10877                 features &= ~NETIF_F_LRO;
10878
10879         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10880          * turned on or off together.
10881          */
10882         vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10883         if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10884                 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10885                         features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10886                 else if (vlan_features)
10887                         features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10888         }
10889 #ifdef CONFIG_BNXT_SRIOV
10890         if (BNXT_VF(bp) && bp->vf.vlan)
10891                 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10892 #endif
10893         return features;
10894 }
10895
10896 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10897 {
10898         struct bnxt *bp = netdev_priv(dev);
10899         u32 flags = bp->flags;
10900         u32 changes;
10901         int rc = 0;
10902         bool re_init = false;
10903         bool update_tpa = false;
10904
10905         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10906         if (features & NETIF_F_GRO_HW)
10907                 flags |= BNXT_FLAG_GRO;
10908         else if (features & NETIF_F_LRO)
10909                 flags |= BNXT_FLAG_LRO;
10910
10911         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10912                 flags &= ~BNXT_FLAG_TPA;
10913
10914         if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10915                 flags |= BNXT_FLAG_STRIP_VLAN;
10916
10917         if (features & NETIF_F_NTUPLE)
10918                 flags |= BNXT_FLAG_RFS;
10919
10920         changes = flags ^ bp->flags;
10921         if (changes & BNXT_FLAG_TPA) {
10922                 update_tpa = true;
10923                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10924                     (flags & BNXT_FLAG_TPA) == 0 ||
10925                     (bp->flags & BNXT_FLAG_CHIP_P5))
10926                         re_init = true;
10927         }
10928
10929         if (changes & ~BNXT_FLAG_TPA)
10930                 re_init = true;
10931
10932         if (flags != bp->flags) {
10933                 u32 old_flags = bp->flags;
10934
10935                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10936                         bp->flags = flags;
10937                         if (update_tpa)
10938                                 bnxt_set_ring_params(bp);
10939                         return rc;
10940                 }
10941
10942                 if (re_init) {
10943                         bnxt_close_nic(bp, false, false);
10944                         bp->flags = flags;
10945                         if (update_tpa)
10946                                 bnxt_set_ring_params(bp);
10947
10948                         return bnxt_open_nic(bp, false, false);
10949                 }
10950                 if (update_tpa) {
10951                         bp->flags = flags;
10952                         rc = bnxt_set_tpa(bp,
10953                                           (flags & BNXT_FLAG_TPA) ?
10954                                           true : false);
10955                         if (rc)
10956                                 bp->flags = old_flags;
10957                 }
10958         }
10959         return rc;
10960 }
10961
10962 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
10963                               u8 **nextp)
10964 {
10965         struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
10966         int hdr_count = 0;
10967         u8 *nexthdr;
10968         int start;
10969
10970         /* Check that there are at most 2 IPv6 extension headers, no
10971          * fragment header, and each is <= 64 bytes.
10972          */
10973         start = nw_off + sizeof(*ip6h);
10974         nexthdr = &ip6h->nexthdr;
10975         while (ipv6_ext_hdr(*nexthdr)) {
10976                 struct ipv6_opt_hdr *hp;
10977                 int hdrlen;
10978
10979                 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
10980                     *nexthdr == NEXTHDR_FRAGMENT)
10981                         return false;
10982                 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
10983                                           skb_headlen(skb), NULL);
10984                 if (!hp)
10985                         return false;
10986                 if (*nexthdr == NEXTHDR_AUTH)
10987                         hdrlen = ipv6_authlen(hp);
10988                 else
10989                         hdrlen = ipv6_optlen(hp);
10990
10991                 if (hdrlen > 64)
10992                         return false;
10993                 nexthdr = &hp->nexthdr;
10994                 start += hdrlen;
10995                 hdr_count++;
10996         }
10997         if (nextp) {
10998                 /* Caller will check inner protocol */
10999                 if (skb->encapsulation) {
11000                         *nextp = nexthdr;
11001                         return true;
11002                 }
11003                 *nextp = NULL;
11004         }
11005         /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11006         return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11007 }
11008
11009 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11010 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11011 {
11012         struct udphdr *uh = udp_hdr(skb);
11013         __be16 udp_port = uh->dest;
11014
11015         if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11016                 return false;
11017         if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11018                 struct ethhdr *eh = inner_eth_hdr(skb);
11019
11020                 switch (eh->h_proto) {
11021                 case htons(ETH_P_IP):
11022                         return true;
11023                 case htons(ETH_P_IPV6):
11024                         return bnxt_exthdr_check(bp, skb,
11025                                                  skb_inner_network_offset(skb),
11026                                                  NULL);
11027                 }
11028         }
11029         return false;
11030 }
11031
11032 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11033 {
11034         switch (l4_proto) {
11035         case IPPROTO_UDP:
11036                 return bnxt_udp_tunl_check(bp, skb);
11037         case IPPROTO_IPIP:
11038                 return true;
11039         case IPPROTO_GRE: {
11040                 switch (skb->inner_protocol) {
11041                 default:
11042                         return false;
11043                 case htons(ETH_P_IP):
11044                         return true;
11045                 case htons(ETH_P_IPV6):
11046                         fallthrough;
11047                 }
11048         }
11049         case IPPROTO_IPV6:
11050                 /* Check ext headers of inner ipv6 */
11051                 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11052                                          NULL);
11053         }
11054         return false;
11055 }
11056
11057 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11058                                              struct net_device *dev,
11059                                              netdev_features_t features)
11060 {
11061         struct bnxt *bp = netdev_priv(dev);
11062         u8 *l4_proto;
11063
11064         features = vlan_features_check(skb, features);
11065         switch (vlan_get_protocol(skb)) {
11066         case htons(ETH_P_IP):
11067                 if (!skb->encapsulation)
11068                         return features;
11069                 l4_proto = &ip_hdr(skb)->protocol;
11070                 if (bnxt_tunl_check(bp, skb, *l4_proto))
11071                         return features;
11072                 break;
11073         case htons(ETH_P_IPV6):
11074                 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11075                                        &l4_proto))
11076                         break;
11077                 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11078                         return features;
11079                 break;
11080         }
11081         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11082 }
11083
11084 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11085                          u32 *reg_buf)
11086 {
11087         struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
11088         struct hwrm_dbg_read_direct_input req = {0};
11089         __le32 *dbg_reg_buf;
11090         dma_addr_t mapping;
11091         int rc, i;
11092
11093         dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
11094                                          &mapping, GFP_KERNEL);
11095         if (!dbg_reg_buf)
11096                 return -ENOMEM;
11097         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
11098         req.host_dest_addr = cpu_to_le64(mapping);
11099         req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11100         req.read_len32 = cpu_to_le32(num_words);
11101         mutex_lock(&bp->hwrm_cmd_lock);
11102         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11103         if (rc || resp->error_code) {
11104                 rc = -EIO;
11105                 goto dbg_rd_reg_exit;
11106         }
11107         for (i = 0; i < num_words; i++)
11108                 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11109
11110 dbg_rd_reg_exit:
11111         mutex_unlock(&bp->hwrm_cmd_lock);
11112         dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
11113         return rc;
11114 }
11115
11116 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11117                                        u32 ring_id, u32 *prod, u32 *cons)
11118 {
11119         struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
11120         struct hwrm_dbg_ring_info_get_input req = {0};
11121         int rc;
11122
11123         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
11124         req.ring_type = ring_type;
11125         req.fw_ring_id = cpu_to_le32(ring_id);
11126         mutex_lock(&bp->hwrm_cmd_lock);
11127         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11128         if (!rc) {
11129                 *prod = le32_to_cpu(resp->producer_index);
11130                 *cons = le32_to_cpu(resp->consumer_index);
11131         }
11132         mutex_unlock(&bp->hwrm_cmd_lock);
11133         return rc;
11134 }
11135
11136 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11137 {
11138         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11139         int i = bnapi->index;
11140
11141         if (!txr)
11142                 return;
11143
11144         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11145                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11146                     txr->tx_cons);
11147 }
11148
11149 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11150 {
11151         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11152         int i = bnapi->index;
11153
11154         if (!rxr)
11155                 return;
11156
11157         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11158                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11159                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11160                     rxr->rx_sw_agg_prod);
11161 }
11162
11163 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11164 {
11165         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11166         int i = bnapi->index;
11167
11168         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11169                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11170 }
11171
11172 static void bnxt_dbg_dump_states(struct bnxt *bp)
11173 {
11174         int i;
11175         struct bnxt_napi *bnapi;
11176
11177         for (i = 0; i < bp->cp_nr_rings; i++) {
11178                 bnapi = bp->bnapi[i];
11179                 if (netif_msg_drv(bp)) {
11180                         bnxt_dump_tx_sw_state(bnapi);
11181                         bnxt_dump_rx_sw_state(bnapi);
11182                         bnxt_dump_cp_sw_state(bnapi);
11183                 }
11184         }
11185 }
11186
11187 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11188 {
11189         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11190         struct hwrm_ring_reset_input req = {0};
11191         struct bnxt_napi *bnapi = rxr->bnapi;
11192         struct bnxt_cp_ring_info *cpr;
11193         u16 cp_ring_id;
11194
11195         cpr = &bnapi->cp_ring;
11196         cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11197         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
11198         req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11199         req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11200         return hwrm_send_message_silent(bp, &req, sizeof(req),
11201                                         HWRM_CMD_TIMEOUT);
11202 }
11203
11204 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11205 {
11206         if (!silent)
11207                 bnxt_dbg_dump_states(bp);
11208         if (netif_running(bp->dev)) {
11209                 int rc;
11210
11211                 if (silent) {
11212                         bnxt_close_nic(bp, false, false);
11213                         bnxt_open_nic(bp, false, false);
11214                 } else {
11215                         bnxt_ulp_stop(bp);
11216                         bnxt_close_nic(bp, true, false);
11217                         rc = bnxt_open_nic(bp, true, false);
11218                         bnxt_ulp_start(bp, rc);
11219                 }
11220         }
11221 }
11222
11223 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11224 {
11225         struct bnxt *bp = netdev_priv(dev);
11226
11227         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
11228         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11229         bnxt_queue_sp_work(bp);
11230 }
11231
11232 static void bnxt_fw_health_check(struct bnxt *bp)
11233 {
11234         struct bnxt_fw_health *fw_health = bp->fw_health;
11235         u32 val;
11236
11237         if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11238                 return;
11239
11240         if (fw_health->tmr_counter) {
11241                 fw_health->tmr_counter--;
11242                 return;
11243         }
11244
11245         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11246         if (val == fw_health->last_fw_heartbeat)
11247                 goto fw_reset;
11248
11249         fw_health->last_fw_heartbeat = val;
11250
11251         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11252         if (val != fw_health->last_fw_reset_cnt)
11253                 goto fw_reset;
11254
11255         fw_health->tmr_counter = fw_health->tmr_multiplier;
11256         return;
11257
11258 fw_reset:
11259         set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11260         bnxt_queue_sp_work(bp);
11261 }
11262
11263 static void bnxt_timer(struct timer_list *t)
11264 {
11265         struct bnxt *bp = from_timer(bp, t, timer);
11266         struct net_device *dev = bp->dev;
11267
11268         if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11269                 return;
11270
11271         if (atomic_read(&bp->intr_sem) != 0)
11272                 goto bnxt_restart_timer;
11273
11274         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11275                 bnxt_fw_health_check(bp);
11276
11277         if (bp->link_info.link_up && bp->stats_coal_ticks) {
11278                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11279                 bnxt_queue_sp_work(bp);
11280         }
11281
11282         if (bnxt_tc_flower_enabled(bp)) {
11283                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11284                 bnxt_queue_sp_work(bp);
11285         }
11286
11287 #ifdef CONFIG_RFS_ACCEL
11288         if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11289                 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11290                 bnxt_queue_sp_work(bp);
11291         }
11292 #endif /*CONFIG_RFS_ACCEL*/
11293
11294         if (bp->link_info.phy_retry) {
11295                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11296                         bp->link_info.phy_retry = false;
11297                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11298                 } else {
11299                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11300                         bnxt_queue_sp_work(bp);
11301                 }
11302         }
11303
11304         if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11305             netif_carrier_ok(dev)) {
11306                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11307                 bnxt_queue_sp_work(bp);
11308         }
11309 bnxt_restart_timer:
11310         mod_timer(&bp->timer, jiffies + bp->current_interval);
11311 }
11312
11313 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11314 {
11315         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11316          * set.  If the device is being closed, bnxt_close() may be holding
11317          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
11318          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11319          */
11320         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11321         rtnl_lock();
11322 }
11323
11324 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11325 {
11326         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11327         rtnl_unlock();
11328 }
11329
11330 /* Only called from bnxt_sp_task() */
11331 static void bnxt_reset(struct bnxt *bp, bool silent)
11332 {
11333         bnxt_rtnl_lock_sp(bp);
11334         if (test_bit(BNXT_STATE_OPEN, &bp->state))
11335                 bnxt_reset_task(bp, silent);
11336         bnxt_rtnl_unlock_sp(bp);
11337 }
11338
11339 /* Only called from bnxt_sp_task() */
11340 static void bnxt_rx_ring_reset(struct bnxt *bp)
11341 {
11342         int i;
11343
11344         bnxt_rtnl_lock_sp(bp);
11345         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11346                 bnxt_rtnl_unlock_sp(bp);
11347                 return;
11348         }
11349         /* Disable and flush TPA before resetting the RX ring */
11350         if (bp->flags & BNXT_FLAG_TPA)
11351                 bnxt_set_tpa(bp, false);
11352         for (i = 0; i < bp->rx_nr_rings; i++) {
11353                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11354                 struct bnxt_cp_ring_info *cpr;
11355                 int rc;
11356
11357                 if (!rxr->bnapi->in_reset)
11358                         continue;
11359
11360                 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11361                 if (rc) {
11362                         if (rc == -EINVAL || rc == -EOPNOTSUPP)
11363                                 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11364                         else
11365                                 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11366                                             rc);
11367                         bnxt_reset_task(bp, true);
11368                         break;
11369                 }
11370                 bnxt_free_one_rx_ring_skbs(bp, i);
11371                 rxr->rx_prod = 0;
11372                 rxr->rx_agg_prod = 0;
11373                 rxr->rx_sw_agg_prod = 0;
11374                 rxr->rx_next_cons = 0;
11375                 rxr->bnapi->in_reset = false;
11376                 bnxt_alloc_one_rx_ring(bp, i);
11377                 cpr = &rxr->bnapi->cp_ring;
11378                 cpr->sw_stats.rx.rx_resets++;
11379                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11380                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11381                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11382         }
11383         if (bp->flags & BNXT_FLAG_TPA)
11384                 bnxt_set_tpa(bp, true);
11385         bnxt_rtnl_unlock_sp(bp);
11386 }
11387
11388 static void bnxt_fw_reset_close(struct bnxt *bp)
11389 {
11390         bnxt_ulp_stop(bp);
11391         /* When firmware is in fatal state, quiesce device and disable
11392          * bus master to prevent any potential bad DMAs before freeing
11393          * kernel memory.
11394          */
11395         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11396                 u16 val = 0;
11397
11398                 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11399                 if (val == 0xffff)
11400                         bp->fw_reset_min_dsecs = 0;
11401                 bnxt_tx_disable(bp);
11402                 bnxt_disable_napi(bp);
11403                 bnxt_disable_int_sync(bp);
11404                 bnxt_free_irq(bp);
11405                 bnxt_clear_int_mode(bp);
11406                 pci_disable_device(bp->pdev);
11407         }
11408         bnxt_ptp_clear(bp);
11409         __bnxt_close_nic(bp, true, false);
11410         bnxt_vf_reps_free(bp);
11411         bnxt_clear_int_mode(bp);
11412         bnxt_hwrm_func_drv_unrgtr(bp);
11413         if (pci_is_enabled(bp->pdev))
11414                 pci_disable_device(bp->pdev);
11415         bnxt_free_ctx_mem(bp);
11416         kfree(bp->ctx);
11417         bp->ctx = NULL;
11418 }
11419
11420 static bool is_bnxt_fw_ok(struct bnxt *bp)
11421 {
11422         struct bnxt_fw_health *fw_health = bp->fw_health;
11423         bool no_heartbeat = false, has_reset = false;
11424         u32 val;
11425
11426         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11427         if (val == fw_health->last_fw_heartbeat)
11428                 no_heartbeat = true;
11429
11430         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11431         if (val != fw_health->last_fw_reset_cnt)
11432                 has_reset = true;
11433
11434         if (!no_heartbeat && has_reset)
11435                 return true;
11436
11437         return false;
11438 }
11439
11440 /* rtnl_lock is acquired before calling this function */
11441 static void bnxt_force_fw_reset(struct bnxt *bp)
11442 {
11443         struct bnxt_fw_health *fw_health = bp->fw_health;
11444         u32 wait_dsecs;
11445
11446         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11447             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11448                 return;
11449
11450         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11451         bnxt_fw_reset_close(bp);
11452         wait_dsecs = fw_health->master_func_wait_dsecs;
11453         if (fw_health->master) {
11454                 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11455                         wait_dsecs = 0;
11456                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11457         } else {
11458                 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11459                 wait_dsecs = fw_health->normal_func_wait_dsecs;
11460                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11461         }
11462
11463         bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11464         bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11465         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11466 }
11467
11468 void bnxt_fw_exception(struct bnxt *bp)
11469 {
11470         netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11471         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11472         bnxt_rtnl_lock_sp(bp);
11473         bnxt_force_fw_reset(bp);
11474         bnxt_rtnl_unlock_sp(bp);
11475 }
11476
11477 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11478  * < 0 on error.
11479  */
11480 static int bnxt_get_registered_vfs(struct bnxt *bp)
11481 {
11482 #ifdef CONFIG_BNXT_SRIOV
11483         int rc;
11484
11485         if (!BNXT_PF(bp))
11486                 return 0;
11487
11488         rc = bnxt_hwrm_func_qcfg(bp);
11489         if (rc) {
11490                 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11491                 return rc;
11492         }
11493         if (bp->pf.registered_vfs)
11494                 return bp->pf.registered_vfs;
11495         if (bp->sriov_cfg)
11496                 return 1;
11497 #endif
11498         return 0;
11499 }
11500
11501 void bnxt_fw_reset(struct bnxt *bp)
11502 {
11503         bnxt_rtnl_lock_sp(bp);
11504         if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11505             !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11506                 int n = 0, tmo;
11507
11508                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11509                 if (bp->pf.active_vfs &&
11510                     !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11511                         n = bnxt_get_registered_vfs(bp);
11512                 if (n < 0) {
11513                         netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11514                                    n);
11515                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11516                         dev_close(bp->dev);
11517                         goto fw_reset_exit;
11518                 } else if (n > 0) {
11519                         u16 vf_tmo_dsecs = n * 10;
11520
11521                         if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11522                                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11523                         bp->fw_reset_state =
11524                                 BNXT_FW_RESET_STATE_POLL_VF;
11525                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11526                         goto fw_reset_exit;
11527                 }
11528                 bnxt_fw_reset_close(bp);
11529                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11530                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11531                         tmo = HZ / 10;
11532                 } else {
11533                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11534                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
11535                 }
11536                 bnxt_queue_fw_reset_work(bp, tmo);
11537         }
11538 fw_reset_exit:
11539         bnxt_rtnl_unlock_sp(bp);
11540 }
11541
11542 static void bnxt_chk_missed_irq(struct bnxt *bp)
11543 {
11544         int i;
11545
11546         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11547                 return;
11548
11549         for (i = 0; i < bp->cp_nr_rings; i++) {
11550                 struct bnxt_napi *bnapi = bp->bnapi[i];
11551                 struct bnxt_cp_ring_info *cpr;
11552                 u32 fw_ring_id;
11553                 int j;
11554
11555                 if (!bnapi)
11556                         continue;
11557
11558                 cpr = &bnapi->cp_ring;
11559                 for (j = 0; j < 2; j++) {
11560                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11561                         u32 val[2];
11562
11563                         if (!cpr2 || cpr2->has_more_work ||
11564                             !bnxt_has_work(bp, cpr2))
11565                                 continue;
11566
11567                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11568                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11569                                 continue;
11570                         }
11571                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11572                         bnxt_dbg_hwrm_ring_info_get(bp,
11573                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11574                                 fw_ring_id, &val[0], &val[1]);
11575                         cpr->sw_stats.cmn.missed_irqs++;
11576                 }
11577         }
11578 }
11579
11580 static void bnxt_cfg_ntp_filters(struct bnxt *);
11581
11582 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11583 {
11584         struct bnxt_link_info *link_info = &bp->link_info;
11585
11586         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11587                 link_info->autoneg = BNXT_AUTONEG_SPEED;
11588                 if (bp->hwrm_spec_code >= 0x10201) {
11589                         if (link_info->auto_pause_setting &
11590                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11591                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11592                 } else {
11593                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11594                 }
11595                 link_info->advertising = link_info->auto_link_speeds;
11596                 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11597         } else {
11598                 link_info->req_link_speed = link_info->force_link_speed;
11599                 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11600                 if (link_info->force_pam4_link_speed) {
11601                         link_info->req_link_speed =
11602                                 link_info->force_pam4_link_speed;
11603                         link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11604                 }
11605                 link_info->req_duplex = link_info->duplex_setting;
11606         }
11607         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11608                 link_info->req_flow_ctrl =
11609                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11610         else
11611                 link_info->req_flow_ctrl = link_info->force_pause_setting;
11612 }
11613
11614 static void bnxt_fw_echo_reply(struct bnxt *bp)
11615 {
11616         struct bnxt_fw_health *fw_health = bp->fw_health;
11617         struct hwrm_func_echo_response_input req = {0};
11618
11619         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
11620         req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11621         req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11622         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11623 }
11624
11625 static void bnxt_sp_task(struct work_struct *work)
11626 {
11627         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11628
11629         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11630         smp_mb__after_atomic();
11631         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11632                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11633                 return;
11634         }
11635
11636         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11637                 bnxt_cfg_rx_mode(bp);
11638
11639         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11640                 bnxt_cfg_ntp_filters(bp);
11641         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11642                 bnxt_hwrm_exec_fwd_req(bp);
11643         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11644                 bnxt_hwrm_port_qstats(bp, 0);
11645                 bnxt_hwrm_port_qstats_ext(bp, 0);
11646                 bnxt_accumulate_all_stats(bp);
11647         }
11648
11649         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11650                 int rc;
11651
11652                 mutex_lock(&bp->link_lock);
11653                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11654                                        &bp->sp_event))
11655                         bnxt_hwrm_phy_qcaps(bp);
11656
11657                 rc = bnxt_update_link(bp, true);
11658                 if (rc)
11659                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11660                                    rc);
11661
11662                 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11663                                        &bp->sp_event))
11664                         bnxt_init_ethtool_link_settings(bp);
11665                 mutex_unlock(&bp->link_lock);
11666         }
11667         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11668                 int rc;
11669
11670                 mutex_lock(&bp->link_lock);
11671                 rc = bnxt_update_phy_setting(bp);
11672                 mutex_unlock(&bp->link_lock);
11673                 if (rc) {
11674                         netdev_warn(bp->dev, "update phy settings retry failed\n");
11675                 } else {
11676                         bp->link_info.phy_retry = false;
11677                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
11678                 }
11679         }
11680         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11681                 mutex_lock(&bp->link_lock);
11682                 bnxt_get_port_module_status(bp);
11683                 mutex_unlock(&bp->link_lock);
11684         }
11685
11686         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11687                 bnxt_tc_flow_stats_work(bp);
11688
11689         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11690                 bnxt_chk_missed_irq(bp);
11691
11692         if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11693                 bnxt_fw_echo_reply(bp);
11694
11695         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
11696          * must be the last functions to be called before exiting.
11697          */
11698         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11699                 bnxt_reset(bp, false);
11700
11701         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11702                 bnxt_reset(bp, true);
11703
11704         if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11705                 bnxt_rx_ring_reset(bp);
11706
11707         if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11708                 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11709
11710         if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11711                 if (!is_bnxt_fw_ok(bp))
11712                         bnxt_devlink_health_report(bp,
11713                                                    BNXT_FW_EXCEPTION_SP_EVENT);
11714         }
11715
11716         smp_mb__before_atomic();
11717         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11718 }
11719
11720 /* Under rtnl_lock */
11721 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11722                      int tx_xdp)
11723 {
11724         int max_rx, max_tx, tx_sets = 1;
11725         int tx_rings_needed, stats;
11726         int rx_rings = rx;
11727         int cp, vnics, rc;
11728
11729         if (tcs)
11730                 tx_sets = tcs;
11731
11732         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11733         if (rc)
11734                 return rc;
11735
11736         if (max_rx < rx)
11737                 return -ENOMEM;
11738
11739         tx_rings_needed = tx * tx_sets + tx_xdp;
11740         if (max_tx < tx_rings_needed)
11741                 return -ENOMEM;
11742
11743         vnics = 1;
11744         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11745                 vnics += rx_rings;
11746
11747         if (bp->flags & BNXT_FLAG_AGG_RINGS)
11748                 rx_rings <<= 1;
11749         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11750         stats = cp;
11751         if (BNXT_NEW_RM(bp)) {
11752                 cp += bnxt_get_ulp_msix_num(bp);
11753                 stats += bnxt_get_ulp_stat_ctxs(bp);
11754         }
11755         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11756                                      stats, vnics);
11757 }
11758
11759 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11760 {
11761         if (bp->bar2) {
11762                 pci_iounmap(pdev, bp->bar2);
11763                 bp->bar2 = NULL;
11764         }
11765
11766         if (bp->bar1) {
11767                 pci_iounmap(pdev, bp->bar1);
11768                 bp->bar1 = NULL;
11769         }
11770
11771         if (bp->bar0) {
11772                 pci_iounmap(pdev, bp->bar0);
11773                 bp->bar0 = NULL;
11774         }
11775 }
11776
11777 static void bnxt_cleanup_pci(struct bnxt *bp)
11778 {
11779         bnxt_unmap_bars(bp, bp->pdev);
11780         pci_release_regions(bp->pdev);
11781         if (pci_is_enabled(bp->pdev))
11782                 pci_disable_device(bp->pdev);
11783 }
11784
11785 static void bnxt_init_dflt_coal(struct bnxt *bp)
11786 {
11787         struct bnxt_coal *coal;
11788
11789         /* Tick values in micro seconds.
11790          * 1 coal_buf x bufs_per_record = 1 completion record.
11791          */
11792         coal = &bp->rx_coal;
11793         coal->coal_ticks = 10;
11794         coal->coal_bufs = 30;
11795         coal->coal_ticks_irq = 1;
11796         coal->coal_bufs_irq = 2;
11797         coal->idle_thresh = 50;
11798         coal->bufs_per_record = 2;
11799         coal->budget = 64;              /* NAPI budget */
11800
11801         coal = &bp->tx_coal;
11802         coal->coal_ticks = 28;
11803         coal->coal_bufs = 30;
11804         coal->coal_ticks_irq = 2;
11805         coal->coal_bufs_irq = 2;
11806         coal->bufs_per_record = 1;
11807
11808         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11809 }
11810
11811 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11812 {
11813         int rc;
11814
11815         bp->fw_cap = 0;
11816         rc = bnxt_hwrm_ver_get(bp);
11817         bnxt_try_map_fw_health_reg(bp);
11818         if (rc) {
11819                 rc = bnxt_try_recover_fw(bp);
11820                 if (rc)
11821                         return rc;
11822                 rc = bnxt_hwrm_ver_get(bp);
11823                 if (rc)
11824                         return rc;
11825         }
11826
11827         if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
11828                 rc = bnxt_alloc_kong_hwrm_resources(bp);
11829                 if (rc)
11830                         bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
11831         }
11832
11833         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
11834             bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
11835                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
11836                 if (rc)
11837                         return rc;
11838         }
11839         bnxt_nvm_cfg_ver_get(bp);
11840
11841         rc = bnxt_hwrm_func_reset(bp);
11842         if (rc)
11843                 return -ENODEV;
11844
11845         bnxt_hwrm_fw_set_time(bp);
11846         return 0;
11847 }
11848
11849 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11850 {
11851         int rc;
11852
11853         /* Get the MAX capabilities for this function */
11854         rc = bnxt_hwrm_func_qcaps(bp);
11855         if (rc) {
11856                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11857                            rc);
11858                 return -ENODEV;
11859         }
11860
11861         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11862         if (rc)
11863                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11864                             rc);
11865
11866         if (bnxt_alloc_fw_health(bp)) {
11867                 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11868         } else {
11869                 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11870                 if (rc)
11871                         netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11872                                     rc);
11873         }
11874
11875         rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11876         if (rc)
11877                 return -ENODEV;
11878
11879         bnxt_hwrm_func_qcfg(bp);
11880         bnxt_hwrm_vnic_qcaps(bp);
11881         bnxt_hwrm_port_led_qcaps(bp);
11882         bnxt_ethtool_init(bp);
11883         bnxt_dcb_init(bp);
11884         return 0;
11885 }
11886
11887 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11888 {
11889         bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11890         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11891                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11892                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11893                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11894         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11895                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11896                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11897                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11898         }
11899 }
11900
11901 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11902 {
11903         struct net_device *dev = bp->dev;
11904
11905         dev->hw_features &= ~NETIF_F_NTUPLE;
11906         dev->features &= ~NETIF_F_NTUPLE;
11907         bp->flags &= ~BNXT_FLAG_RFS;
11908         if (bnxt_rfs_supported(bp)) {
11909                 dev->hw_features |= NETIF_F_NTUPLE;
11910                 if (bnxt_rfs_capable(bp)) {
11911                         bp->flags |= BNXT_FLAG_RFS;
11912                         dev->features |= NETIF_F_NTUPLE;
11913                 }
11914         }
11915 }
11916
11917 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11918 {
11919         struct pci_dev *pdev = bp->pdev;
11920
11921         bnxt_set_dflt_rss_hash_type(bp);
11922         bnxt_set_dflt_rfs(bp);
11923
11924         bnxt_get_wol_settings(bp);
11925         if (bp->flags & BNXT_FLAG_WOL_CAP)
11926                 device_set_wakeup_enable(&pdev->dev, bp->wol);
11927         else
11928                 device_set_wakeup_capable(&pdev->dev, false);
11929
11930         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11931         bnxt_hwrm_coal_params_qcaps(bp);
11932 }
11933
11934 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11935
11936 static int bnxt_fw_init_one(struct bnxt *bp)
11937 {
11938         int rc;
11939
11940         rc = bnxt_fw_init_one_p1(bp);
11941         if (rc) {
11942                 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11943                 return rc;
11944         }
11945         rc = bnxt_fw_init_one_p2(bp);
11946         if (rc) {
11947                 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11948                 return rc;
11949         }
11950         rc = bnxt_probe_phy(bp, false);
11951         if (rc)
11952                 return rc;
11953         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11954         if (rc)
11955                 return rc;
11956
11957         /* In case fw capabilities have changed, destroy the unneeded
11958          * reporters and create newly capable ones.
11959          */
11960         bnxt_dl_fw_reporters_destroy(bp, false);
11961         bnxt_dl_fw_reporters_create(bp);
11962         bnxt_fw_init_one_p3(bp);
11963         return 0;
11964 }
11965
11966 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11967 {
11968         struct bnxt_fw_health *fw_health = bp->fw_health;
11969         u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11970         u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11971         u32 reg_type, reg_off, delay_msecs;
11972
11973         delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11974         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11975         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11976         switch (reg_type) {
11977         case BNXT_FW_HEALTH_REG_TYPE_CFG:
11978                 pci_write_config_dword(bp->pdev, reg_off, val);
11979                 break;
11980         case BNXT_FW_HEALTH_REG_TYPE_GRC:
11981                 writel(reg_off & BNXT_GRC_BASE_MASK,
11982                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11983                 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
11984                 fallthrough;
11985         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
11986                 writel(val, bp->bar0 + reg_off);
11987                 break;
11988         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
11989                 writel(val, bp->bar1 + reg_off);
11990                 break;
11991         }
11992         if (delay_msecs) {
11993                 pci_read_config_dword(bp->pdev, 0, &val);
11994                 msleep(delay_msecs);
11995         }
11996 }
11997
11998 static void bnxt_reset_all(struct bnxt *bp)
11999 {
12000         struct bnxt_fw_health *fw_health = bp->fw_health;
12001         int i, rc;
12002
12003         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12004                 bnxt_fw_reset_via_optee(bp);
12005                 bp->fw_reset_timestamp = jiffies;
12006                 return;
12007         }
12008
12009         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12010                 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12011                         bnxt_fw_reset_writel(bp, i);
12012         } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
12013                 struct hwrm_fw_reset_input req = {0};
12014
12015                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
12016                 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
12017                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12018                 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12019                 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12020                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
12021                 if (rc != -ENODEV)
12022                         netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12023         }
12024         bp->fw_reset_timestamp = jiffies;
12025 }
12026
12027 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12028 {
12029         return time_after(jiffies, bp->fw_reset_timestamp +
12030                           (bp->fw_reset_max_dsecs * HZ / 10));
12031 }
12032
12033 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12034 {
12035         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12036         if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12037                 bnxt_ulp_start(bp, rc);
12038                 bnxt_dl_health_status_update(bp, false);
12039         }
12040         bp->fw_reset_state = 0;
12041         dev_close(bp->dev);
12042 }
12043
12044 static void bnxt_fw_reset_task(struct work_struct *work)
12045 {
12046         struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12047         int rc = 0;
12048
12049         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12050                 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12051                 return;
12052         }
12053
12054         switch (bp->fw_reset_state) {
12055         case BNXT_FW_RESET_STATE_POLL_VF: {
12056                 int n = bnxt_get_registered_vfs(bp);
12057                 int tmo;
12058
12059                 if (n < 0) {
12060                         netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12061                                    n, jiffies_to_msecs(jiffies -
12062                                    bp->fw_reset_timestamp));
12063                         goto fw_reset_abort;
12064                 } else if (n > 0) {
12065                         if (bnxt_fw_reset_timeout(bp)) {
12066                                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12067                                 bp->fw_reset_state = 0;
12068                                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12069                                            n);
12070                                 return;
12071                         }
12072                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12073                         return;
12074                 }
12075                 bp->fw_reset_timestamp = jiffies;
12076                 rtnl_lock();
12077                 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12078                         bnxt_fw_reset_abort(bp, rc);
12079                         rtnl_unlock();
12080                         return;
12081                 }
12082                 bnxt_fw_reset_close(bp);
12083                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12084                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12085                         tmo = HZ / 10;
12086                 } else {
12087                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12088                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
12089                 }
12090                 rtnl_unlock();
12091                 bnxt_queue_fw_reset_work(bp, tmo);
12092                 return;
12093         }
12094         case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12095                 u32 val;
12096
12097                 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12098                 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12099                     !bnxt_fw_reset_timeout(bp)) {
12100                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12101                         return;
12102                 }
12103
12104                 if (!bp->fw_health->master) {
12105                         u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12106
12107                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12108                         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12109                         return;
12110                 }
12111                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12112         }
12113                 fallthrough;
12114         case BNXT_FW_RESET_STATE_RESET_FW:
12115                 bnxt_reset_all(bp);
12116                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12117                 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12118                 return;
12119         case BNXT_FW_RESET_STATE_ENABLE_DEV:
12120                 bnxt_inv_fw_health_reg(bp);
12121                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12122                     !bp->fw_reset_min_dsecs) {
12123                         u16 val;
12124
12125                         pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12126                         if (val == 0xffff) {
12127                                 if (bnxt_fw_reset_timeout(bp)) {
12128                                         netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12129                                         rc = -ETIMEDOUT;
12130                                         goto fw_reset_abort;
12131                                 }
12132                                 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12133                                 return;
12134                         }
12135                 }
12136                 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12137                 if (pci_enable_device(bp->pdev)) {
12138                         netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12139                         rc = -ENODEV;
12140                         goto fw_reset_abort;
12141                 }
12142                 pci_set_master(bp->pdev);
12143                 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12144                 fallthrough;
12145         case BNXT_FW_RESET_STATE_POLL_FW:
12146                 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12147                 rc = __bnxt_hwrm_ver_get(bp, true);
12148                 if (rc) {
12149                         if (bnxt_fw_reset_timeout(bp)) {
12150                                 netdev_err(bp->dev, "Firmware reset aborted\n");
12151                                 goto fw_reset_abort_status;
12152                         }
12153                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12154                         return;
12155                 }
12156                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12157                 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12158                 fallthrough;
12159         case BNXT_FW_RESET_STATE_OPENING:
12160                 while (!rtnl_trylock()) {
12161                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12162                         return;
12163                 }
12164                 rc = bnxt_open(bp->dev);
12165                 if (rc) {
12166                         netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12167                         bnxt_fw_reset_abort(bp, rc);
12168                         rtnl_unlock();
12169                         return;
12170                 }
12171
12172                 bp->fw_reset_state = 0;
12173                 /* Make sure fw_reset_state is 0 before clearing the flag */
12174                 smp_mb__before_atomic();
12175                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12176                 bnxt_ulp_start(bp, 0);
12177                 bnxt_reenable_sriov(bp);
12178                 bnxt_vf_reps_alloc(bp);
12179                 bnxt_vf_reps_open(bp);
12180                 bnxt_dl_health_recovery_done(bp);
12181                 bnxt_dl_health_status_update(bp, true);
12182                 rtnl_unlock();
12183                 break;
12184         }
12185         return;
12186
12187 fw_reset_abort_status:
12188         if (bp->fw_health->status_reliable ||
12189             (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12190                 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12191
12192                 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12193         }
12194 fw_reset_abort:
12195         rtnl_lock();
12196         bnxt_fw_reset_abort(bp, rc);
12197         rtnl_unlock();
12198 }
12199
12200 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12201 {
12202         int rc;
12203         struct bnxt *bp = netdev_priv(dev);
12204
12205         SET_NETDEV_DEV(dev, &pdev->dev);
12206
12207         /* enable device (incl. PCI PM wakeup), and bus-mastering */
12208         rc = pci_enable_device(pdev);
12209         if (rc) {
12210                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12211                 goto init_err;
12212         }
12213
12214         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12215                 dev_err(&pdev->dev,
12216                         "Cannot find PCI device base address, aborting\n");
12217                 rc = -ENODEV;
12218                 goto init_err_disable;
12219         }
12220
12221         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12222         if (rc) {
12223                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12224                 goto init_err_disable;
12225         }
12226
12227         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12228             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12229                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12230                 rc = -EIO;
12231                 goto init_err_release;
12232         }
12233
12234         pci_set_master(pdev);
12235
12236         bp->dev = dev;
12237         bp->pdev = pdev;
12238
12239         /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12240          * determines the BAR size.
12241          */
12242         bp->bar0 = pci_ioremap_bar(pdev, 0);
12243         if (!bp->bar0) {
12244                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12245                 rc = -ENOMEM;
12246                 goto init_err_release;
12247         }
12248
12249         bp->bar2 = pci_ioremap_bar(pdev, 4);
12250         if (!bp->bar2) {
12251                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12252                 rc = -ENOMEM;
12253                 goto init_err_release;
12254         }
12255
12256         pci_enable_pcie_error_reporting(pdev);
12257
12258         INIT_WORK(&bp->sp_task, bnxt_sp_task);
12259         INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12260
12261         spin_lock_init(&bp->ntp_fltr_lock);
12262 #if BITS_PER_LONG == 32
12263         spin_lock_init(&bp->db_lock);
12264 #endif
12265
12266         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12267         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12268
12269         bnxt_init_dflt_coal(bp);
12270
12271         timer_setup(&bp->timer, bnxt_timer, 0);
12272         bp->current_interval = BNXT_TIMER_INTERVAL;
12273
12274         bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12275         bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12276
12277         clear_bit(BNXT_STATE_OPEN, &bp->state);
12278         return 0;
12279
12280 init_err_release:
12281         bnxt_unmap_bars(bp, pdev);
12282         pci_release_regions(pdev);
12283
12284 init_err_disable:
12285         pci_disable_device(pdev);
12286
12287 init_err:
12288         return rc;
12289 }
12290
12291 /* rtnl_lock held */
12292 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12293 {
12294         struct sockaddr *addr = p;
12295         struct bnxt *bp = netdev_priv(dev);
12296         int rc = 0;
12297
12298         if (!is_valid_ether_addr(addr->sa_data))
12299                 return -EADDRNOTAVAIL;
12300
12301         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12302                 return 0;
12303
12304         rc = bnxt_approve_mac(bp, addr->sa_data, true);
12305         if (rc)
12306                 return rc;
12307
12308         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12309         if (netif_running(dev)) {
12310                 bnxt_close_nic(bp, false, false);
12311                 rc = bnxt_open_nic(bp, false, false);
12312         }
12313
12314         return rc;
12315 }
12316
12317 /* rtnl_lock held */
12318 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12319 {
12320         struct bnxt *bp = netdev_priv(dev);
12321
12322         if (netif_running(dev))
12323                 bnxt_close_nic(bp, true, false);
12324
12325         dev->mtu = new_mtu;
12326         bnxt_set_ring_params(bp);
12327
12328         if (netif_running(dev))
12329                 return bnxt_open_nic(bp, true, false);
12330
12331         return 0;
12332 }
12333
12334 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12335 {
12336         struct bnxt *bp = netdev_priv(dev);
12337         bool sh = false;
12338         int rc;
12339
12340         if (tc > bp->max_tc) {
12341                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12342                            tc, bp->max_tc);
12343                 return -EINVAL;
12344         }
12345
12346         if (netdev_get_num_tc(dev) == tc)
12347                 return 0;
12348
12349         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12350                 sh = true;
12351
12352         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12353                               sh, tc, bp->tx_nr_rings_xdp);
12354         if (rc)
12355                 return rc;
12356
12357         /* Needs to close the device and do hw resource re-allocations */
12358         if (netif_running(bp->dev))
12359                 bnxt_close_nic(bp, true, false);
12360
12361         if (tc) {
12362                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12363                 netdev_set_num_tc(dev, tc);
12364         } else {
12365                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12366                 netdev_reset_tc(dev);
12367         }
12368         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12369         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12370                                bp->tx_nr_rings + bp->rx_nr_rings;
12371
12372         if (netif_running(bp->dev))
12373                 return bnxt_open_nic(bp, true, false);
12374
12375         return 0;
12376 }
12377
12378 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12379                                   void *cb_priv)
12380 {
12381         struct bnxt *bp = cb_priv;
12382
12383         if (!bnxt_tc_flower_enabled(bp) ||
12384             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12385                 return -EOPNOTSUPP;
12386
12387         switch (type) {
12388         case TC_SETUP_CLSFLOWER:
12389                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12390         default:
12391                 return -EOPNOTSUPP;
12392         }
12393 }
12394
12395 LIST_HEAD(bnxt_block_cb_list);
12396
12397 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12398                          void *type_data)
12399 {
12400         struct bnxt *bp = netdev_priv(dev);
12401
12402         switch (type) {
12403         case TC_SETUP_BLOCK:
12404                 return flow_block_cb_setup_simple(type_data,
12405                                                   &bnxt_block_cb_list,
12406                                                   bnxt_setup_tc_block_cb,
12407                                                   bp, bp, true);
12408         case TC_SETUP_QDISC_MQPRIO: {
12409                 struct tc_mqprio_qopt *mqprio = type_data;
12410
12411                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12412
12413                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12414         }
12415         default:
12416                 return -EOPNOTSUPP;
12417         }
12418 }
12419
12420 #ifdef CONFIG_RFS_ACCEL
12421 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12422                             struct bnxt_ntuple_filter *f2)
12423 {
12424         struct flow_keys *keys1 = &f1->fkeys;
12425         struct flow_keys *keys2 = &f2->fkeys;
12426
12427         if (keys1->basic.n_proto != keys2->basic.n_proto ||
12428             keys1->basic.ip_proto != keys2->basic.ip_proto)
12429                 return false;
12430
12431         if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12432                 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12433                     keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12434                         return false;
12435         } else {
12436                 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12437                            sizeof(keys1->addrs.v6addrs.src)) ||
12438                     memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12439                            sizeof(keys1->addrs.v6addrs.dst)))
12440                         return false;
12441         }
12442
12443         if (keys1->ports.ports == keys2->ports.ports &&
12444             keys1->control.flags == keys2->control.flags &&
12445             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12446             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12447                 return true;
12448
12449         return false;
12450 }
12451
12452 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12453                               u16 rxq_index, u32 flow_id)
12454 {
12455         struct bnxt *bp = netdev_priv(dev);
12456         struct bnxt_ntuple_filter *fltr, *new_fltr;
12457         struct flow_keys *fkeys;
12458         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12459         int rc = 0, idx, bit_id, l2_idx = 0;
12460         struct hlist_head *head;
12461         u32 flags;
12462
12463         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12464                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12465                 int off = 0, j;
12466
12467                 netif_addr_lock_bh(dev);
12468                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12469                         if (ether_addr_equal(eth->h_dest,
12470                                              vnic->uc_list + off)) {
12471                                 l2_idx = j + 1;
12472                                 break;
12473                         }
12474                 }
12475                 netif_addr_unlock_bh(dev);
12476                 if (!l2_idx)
12477                         return -EINVAL;
12478         }
12479         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12480         if (!new_fltr)
12481                 return -ENOMEM;
12482
12483         fkeys = &new_fltr->fkeys;
12484         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12485                 rc = -EPROTONOSUPPORT;
12486                 goto err_free;
12487         }
12488
12489         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12490              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12491             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12492              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12493                 rc = -EPROTONOSUPPORT;
12494                 goto err_free;
12495         }
12496         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12497             bp->hwrm_spec_code < 0x10601) {
12498                 rc = -EPROTONOSUPPORT;
12499                 goto err_free;
12500         }
12501         flags = fkeys->control.flags;
12502         if (((flags & FLOW_DIS_ENCAPSULATION) &&
12503              bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12504                 rc = -EPROTONOSUPPORT;
12505                 goto err_free;
12506         }
12507
12508         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12509         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12510
12511         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12512         head = &bp->ntp_fltr_hash_tbl[idx];
12513         rcu_read_lock();
12514         hlist_for_each_entry_rcu(fltr, head, hash) {
12515                 if (bnxt_fltr_match(fltr, new_fltr)) {
12516                         rcu_read_unlock();
12517                         rc = 0;
12518                         goto err_free;
12519                 }
12520         }
12521         rcu_read_unlock();
12522
12523         spin_lock_bh(&bp->ntp_fltr_lock);
12524         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12525                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
12526         if (bit_id < 0) {
12527                 spin_unlock_bh(&bp->ntp_fltr_lock);
12528                 rc = -ENOMEM;
12529                 goto err_free;
12530         }
12531
12532         new_fltr->sw_id = (u16)bit_id;
12533         new_fltr->flow_id = flow_id;
12534         new_fltr->l2_fltr_idx = l2_idx;
12535         new_fltr->rxq = rxq_index;
12536         hlist_add_head_rcu(&new_fltr->hash, head);
12537         bp->ntp_fltr_count++;
12538         spin_unlock_bh(&bp->ntp_fltr_lock);
12539
12540         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12541         bnxt_queue_sp_work(bp);
12542
12543         return new_fltr->sw_id;
12544
12545 err_free:
12546         kfree(new_fltr);
12547         return rc;
12548 }
12549
12550 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12551 {
12552         int i;
12553
12554         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12555                 struct hlist_head *head;
12556                 struct hlist_node *tmp;
12557                 struct bnxt_ntuple_filter *fltr;
12558                 int rc;
12559
12560                 head = &bp->ntp_fltr_hash_tbl[i];
12561                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12562                         bool del = false;
12563
12564                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12565                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12566                                                         fltr->flow_id,
12567                                                         fltr->sw_id)) {
12568                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
12569                                                                          fltr);
12570                                         del = true;
12571                                 }
12572                         } else {
12573                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12574                                                                        fltr);
12575                                 if (rc)
12576                                         del = true;
12577                                 else
12578                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
12579                         }
12580
12581                         if (del) {
12582                                 spin_lock_bh(&bp->ntp_fltr_lock);
12583                                 hlist_del_rcu(&fltr->hash);
12584                                 bp->ntp_fltr_count--;
12585                                 spin_unlock_bh(&bp->ntp_fltr_lock);
12586                                 synchronize_rcu();
12587                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12588                                 kfree(fltr);
12589                         }
12590                 }
12591         }
12592         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12593                 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12594 }
12595
12596 #else
12597
12598 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12599 {
12600 }
12601
12602 #endif /* CONFIG_RFS_ACCEL */
12603
12604 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12605 {
12606         struct bnxt *bp = netdev_priv(netdev);
12607         struct udp_tunnel_info ti;
12608         unsigned int cmd;
12609
12610         udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12611         if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
12612                 bp->vxlan_port = ti.port;
12613                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12614         } else {
12615                 bp->nge_port = ti.port;
12616                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12617         }
12618
12619         if (ti.port)
12620                 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12621
12622         return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12623 }
12624
12625 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12626         .sync_table     = bnxt_udp_tunnel_sync,
12627         .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12628                           UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12629         .tables         = {
12630                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
12631                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12632         },
12633 };
12634
12635 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12636                                struct net_device *dev, u32 filter_mask,
12637                                int nlflags)
12638 {
12639         struct bnxt *bp = netdev_priv(dev);
12640
12641         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12642                                        nlflags, filter_mask, NULL);
12643 }
12644
12645 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12646                                u16 flags, struct netlink_ext_ack *extack)
12647 {
12648         struct bnxt *bp = netdev_priv(dev);
12649         struct nlattr *attr, *br_spec;
12650         int rem, rc = 0;
12651
12652         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12653                 return -EOPNOTSUPP;
12654
12655         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12656         if (!br_spec)
12657                 return -EINVAL;
12658
12659         nla_for_each_nested(attr, br_spec, rem) {
12660                 u16 mode;
12661
12662                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12663                         continue;
12664
12665                 if (nla_len(attr) < sizeof(mode))
12666                         return -EINVAL;
12667
12668                 mode = nla_get_u16(attr);
12669                 if (mode == bp->br_mode)
12670                         break;
12671
12672                 rc = bnxt_hwrm_set_br_mode(bp, mode);
12673                 if (!rc)
12674                         bp->br_mode = mode;
12675                 break;
12676         }
12677         return rc;
12678 }
12679
12680 int bnxt_get_port_parent_id(struct net_device *dev,
12681                             struct netdev_phys_item_id *ppid)
12682 {
12683         struct bnxt *bp = netdev_priv(dev);
12684
12685         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12686                 return -EOPNOTSUPP;
12687
12688         /* The PF and it's VF-reps only support the switchdev framework */
12689         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12690                 return -EOPNOTSUPP;
12691
12692         ppid->id_len = sizeof(bp->dsn);
12693         memcpy(ppid->id, bp->dsn, ppid->id_len);
12694
12695         return 0;
12696 }
12697
12698 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12699 {
12700         struct bnxt *bp = netdev_priv(dev);
12701
12702         return &bp->dl_port;
12703 }
12704
12705 static const struct net_device_ops bnxt_netdev_ops = {
12706         .ndo_open               = bnxt_open,
12707         .ndo_start_xmit         = bnxt_start_xmit,
12708         .ndo_stop               = bnxt_close,
12709         .ndo_get_stats64        = bnxt_get_stats64,
12710         .ndo_set_rx_mode        = bnxt_set_rx_mode,
12711         .ndo_do_ioctl           = bnxt_ioctl,
12712         .ndo_validate_addr      = eth_validate_addr,
12713         .ndo_set_mac_address    = bnxt_change_mac_addr,
12714         .ndo_change_mtu         = bnxt_change_mtu,
12715         .ndo_fix_features       = bnxt_fix_features,
12716         .ndo_set_features       = bnxt_set_features,
12717         .ndo_features_check     = bnxt_features_check,
12718         .ndo_tx_timeout         = bnxt_tx_timeout,
12719 #ifdef CONFIG_BNXT_SRIOV
12720         .ndo_get_vf_config      = bnxt_get_vf_config,
12721         .ndo_set_vf_mac         = bnxt_set_vf_mac,
12722         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
12723         .ndo_set_vf_rate        = bnxt_set_vf_bw,
12724         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
12725         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
12726         .ndo_set_vf_trust       = bnxt_set_vf_trust,
12727 #endif
12728         .ndo_setup_tc           = bnxt_setup_tc,
12729 #ifdef CONFIG_RFS_ACCEL
12730         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
12731 #endif
12732         .ndo_bpf                = bnxt_xdp,
12733         .ndo_xdp_xmit           = bnxt_xdp_xmit,
12734         .ndo_bridge_getlink     = bnxt_bridge_getlink,
12735         .ndo_bridge_setlink     = bnxt_bridge_setlink,
12736         .ndo_get_devlink_port   = bnxt_get_devlink_port,
12737 };
12738
12739 static void bnxt_remove_one(struct pci_dev *pdev)
12740 {
12741         struct net_device *dev = pci_get_drvdata(pdev);
12742         struct bnxt *bp = netdev_priv(dev);
12743
12744         if (BNXT_PF(bp))
12745                 bnxt_sriov_disable(bp);
12746
12747         if (BNXT_PF(bp))
12748                 devlink_port_type_clear(&bp->dl_port);
12749
12750         pci_disable_pcie_error_reporting(pdev);
12751         unregister_netdev(dev);
12752         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12753         /* Flush any pending tasks */
12754         cancel_work_sync(&bp->sp_task);
12755         cancel_delayed_work_sync(&bp->fw_reset_task);
12756         bp->sp_event = 0;
12757
12758         bnxt_dl_fw_reporters_destroy(bp, true);
12759         bnxt_dl_unregister(bp);
12760         bnxt_shutdown_tc(bp);
12761
12762         bnxt_clear_int_mode(bp);
12763         bnxt_hwrm_func_drv_unrgtr(bp);
12764         bnxt_free_hwrm_resources(bp);
12765         bnxt_free_hwrm_short_cmd_req(bp);
12766         bnxt_ethtool_free(bp);
12767         bnxt_dcb_free(bp);
12768         kfree(bp->edev);
12769         bp->edev = NULL;
12770         kfree(bp->ptp_cfg);
12771         bp->ptp_cfg = NULL;
12772         kfree(bp->fw_health);
12773         bp->fw_health = NULL;
12774         bnxt_cleanup_pci(bp);
12775         bnxt_free_ctx_mem(bp);
12776         kfree(bp->ctx);
12777         bp->ctx = NULL;
12778         kfree(bp->rss_indir_tbl);
12779         bp->rss_indir_tbl = NULL;
12780         bnxt_free_port_stats(bp);
12781         free_netdev(dev);
12782 }
12783
12784 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12785 {
12786         int rc = 0;
12787         struct bnxt_link_info *link_info = &bp->link_info;
12788
12789         bp->phy_flags = 0;
12790         rc = bnxt_hwrm_phy_qcaps(bp);
12791         if (rc) {
12792                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12793                            rc);
12794                 return rc;
12795         }
12796         if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12797                 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12798         else
12799                 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
12800         if (!fw_dflt)
12801                 return 0;
12802
12803         rc = bnxt_update_link(bp, false);
12804         if (rc) {
12805                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12806                            rc);
12807                 return rc;
12808         }
12809
12810         /* Older firmware does not have supported_auto_speeds, so assume
12811          * that all supported speeds can be autonegotiated.
12812          */
12813         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12814                 link_info->support_auto_speeds = link_info->support_speeds;
12815
12816         bnxt_init_ethtool_link_settings(bp);
12817         return 0;
12818 }
12819
12820 static int bnxt_get_max_irq(struct pci_dev *pdev)
12821 {
12822         u16 ctrl;
12823
12824         if (!pdev->msix_cap)
12825                 return 1;
12826
12827         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12828         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12829 }
12830
12831 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12832                                 int *max_cp)
12833 {
12834         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12835         int max_ring_grps = 0, max_irq;
12836
12837         *max_tx = hw_resc->max_tx_rings;
12838         *max_rx = hw_resc->max_rx_rings;
12839         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12840         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12841                         bnxt_get_ulp_msix_num(bp),
12842                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12843         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12844                 *max_cp = min_t(int, *max_cp, max_irq);
12845         max_ring_grps = hw_resc->max_hw_ring_grps;
12846         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12847                 *max_cp -= 1;
12848                 *max_rx -= 2;
12849         }
12850         if (bp->flags & BNXT_FLAG_AGG_RINGS)
12851                 *max_rx >>= 1;
12852         if (bp->flags & BNXT_FLAG_CHIP_P5) {
12853                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12854                 /* On P5 chips, max_cp output param should be available NQs */
12855                 *max_cp = max_irq;
12856         }
12857         *max_rx = min_t(int, *max_rx, max_ring_grps);
12858 }
12859
12860 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12861 {
12862         int rx, tx, cp;
12863
12864         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
12865         *max_rx = rx;
12866         *max_tx = tx;
12867         if (!rx || !tx || !cp)
12868                 return -ENOMEM;
12869
12870         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12871 }
12872
12873 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12874                                bool shared)
12875 {
12876         int rc;
12877
12878         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12879         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12880                 /* Not enough rings, try disabling agg rings. */
12881                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12882                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12883                 if (rc) {
12884                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
12885                         bp->flags |= BNXT_FLAG_AGG_RINGS;
12886                         return rc;
12887                 }
12888                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12889                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12890                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12891                 bnxt_set_ring_params(bp);
12892         }
12893
12894         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12895                 int max_cp, max_stat, max_irq;
12896
12897                 /* Reserve minimum resources for RoCE */
12898                 max_cp = bnxt_get_max_func_cp_rings(bp);
12899                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12900                 max_irq = bnxt_get_max_func_irqs(bp);
12901                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12902                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12903                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12904                         return 0;
12905
12906                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12907                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12908                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12909                 max_cp = min_t(int, max_cp, max_irq);
12910                 max_cp = min_t(int, max_cp, max_stat);
12911                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12912                 if (rc)
12913                         rc = 0;
12914         }
12915         return rc;
12916 }
12917
12918 /* In initial default shared ring setting, each shared ring must have a
12919  * RX/TX ring pair.
12920  */
12921 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12922 {
12923         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12924         bp->rx_nr_rings = bp->cp_nr_rings;
12925         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12926         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12927 }
12928
12929 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12930 {
12931         int dflt_rings, max_rx_rings, max_tx_rings, rc;
12932
12933         if (!bnxt_can_reserve_rings(bp))
12934                 return 0;
12935
12936         if (sh)
12937                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
12938         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
12939         /* Reduce default rings on multi-port cards so that total default
12940          * rings do not exceed CPU count.
12941          */
12942         if (bp->port_count > 1) {
12943                 int max_rings =
12944                         max_t(int, num_online_cpus() / bp->port_count, 1);
12945
12946                 dflt_rings = min_t(int, dflt_rings, max_rings);
12947         }
12948         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
12949         if (rc)
12950                 return rc;
12951         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12952         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
12953         if (sh)
12954                 bnxt_trim_dflt_sh_rings(bp);
12955         else
12956                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12957         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12958
12959         rc = __bnxt_reserve_rings(bp);
12960         if (rc)
12961                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
12962         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12963         if (sh)
12964                 bnxt_trim_dflt_sh_rings(bp);
12965
12966         /* Rings may have been trimmed, re-reserve the trimmed rings. */
12967         if (bnxt_need_reserve_rings(bp)) {
12968                 rc = __bnxt_reserve_rings(bp);
12969                 if (rc)
12970                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12971                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12972         }
12973         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12974                 bp->rx_nr_rings++;
12975                 bp->cp_nr_rings++;
12976         }
12977         if (rc) {
12978                 bp->tx_nr_rings = 0;
12979                 bp->rx_nr_rings = 0;
12980         }
12981         return rc;
12982 }
12983
12984 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
12985 {
12986         int rc;
12987
12988         if (bp->tx_nr_rings)
12989                 return 0;
12990
12991         bnxt_ulp_irq_stop(bp);
12992         bnxt_clear_int_mode(bp);
12993         rc = bnxt_set_dflt_rings(bp, true);
12994         if (rc) {
12995                 netdev_err(bp->dev, "Not enough rings available.\n");
12996                 goto init_dflt_ring_err;
12997         }
12998         rc = bnxt_init_int_mode(bp);
12999         if (rc)
13000                 goto init_dflt_ring_err;
13001
13002         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13003         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
13004                 bp->flags |= BNXT_FLAG_RFS;
13005                 bp->dev->features |= NETIF_F_NTUPLE;
13006         }
13007 init_dflt_ring_err:
13008         bnxt_ulp_irq_restart(bp, rc);
13009         return rc;
13010 }
13011
13012 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13013 {
13014         int rc;
13015
13016         ASSERT_RTNL();
13017         bnxt_hwrm_func_qcaps(bp);
13018
13019         if (netif_running(bp->dev))
13020                 __bnxt_close_nic(bp, true, false);
13021
13022         bnxt_ulp_irq_stop(bp);
13023         bnxt_clear_int_mode(bp);
13024         rc = bnxt_init_int_mode(bp);
13025         bnxt_ulp_irq_restart(bp, rc);
13026
13027         if (netif_running(bp->dev)) {
13028                 if (rc)
13029                         dev_close(bp->dev);
13030                 else
13031                         rc = bnxt_open_nic(bp, true, false);
13032         }
13033
13034         return rc;
13035 }
13036
13037 static int bnxt_init_mac_addr(struct bnxt *bp)
13038 {
13039         int rc = 0;
13040
13041         if (BNXT_PF(bp)) {
13042                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
13043         } else {
13044 #ifdef CONFIG_BNXT_SRIOV
13045                 struct bnxt_vf_info *vf = &bp->vf;
13046                 bool strict_approval = true;
13047
13048                 if (is_valid_ether_addr(vf->mac_addr)) {
13049                         /* overwrite netdev dev_addr with admin VF MAC */
13050                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
13051                         /* Older PF driver or firmware may not approve this
13052                          * correctly.
13053                          */
13054                         strict_approval = false;
13055                 } else {
13056                         eth_hw_addr_random(bp->dev);
13057                 }
13058                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13059 #endif
13060         }
13061         return rc;
13062 }
13063
13064 #define BNXT_VPD_LEN    512
13065 static void bnxt_vpd_read_info(struct bnxt *bp)
13066 {
13067         struct pci_dev *pdev = bp->pdev;
13068         int i, len, pos, ro_size, size;
13069         ssize_t vpd_size;
13070         u8 *vpd_data;
13071
13072         vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
13073         if (!vpd_data)
13074                 return;
13075
13076         vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
13077         if (vpd_size <= 0) {
13078                 netdev_err(bp->dev, "Unable to read VPD\n");
13079                 goto exit;
13080         }
13081
13082         i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
13083         if (i < 0) {
13084                 netdev_err(bp->dev, "VPD READ-Only not found\n");
13085                 goto exit;
13086         }
13087
13088         ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
13089         i += PCI_VPD_LRDT_TAG_SIZE;
13090         if (i + ro_size > vpd_size)
13091                 goto exit;
13092
13093         pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13094                                         PCI_VPD_RO_KEYWORD_PARTNO);
13095         if (pos < 0)
13096                 goto read_sn;
13097
13098         len = pci_vpd_info_field_size(&vpd_data[pos]);
13099         pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13100         if (len + pos > vpd_size)
13101                 goto read_sn;
13102
13103         size = min(len, BNXT_VPD_FLD_LEN - 1);
13104         memcpy(bp->board_partno, &vpd_data[pos], size);
13105
13106 read_sn:
13107         pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13108                                         PCI_VPD_RO_KEYWORD_SERIALNO);
13109         if (pos < 0)
13110                 goto exit;
13111
13112         len = pci_vpd_info_field_size(&vpd_data[pos]);
13113         pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13114         if (len + pos > vpd_size)
13115                 goto exit;
13116
13117         size = min(len, BNXT_VPD_FLD_LEN - 1);
13118         memcpy(bp->board_serialno, &vpd_data[pos], size);
13119 exit:
13120         kfree(vpd_data);
13121 }
13122
13123 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13124 {
13125         struct pci_dev *pdev = bp->pdev;
13126         u64 qword;
13127
13128         qword = pci_get_dsn(pdev);
13129         if (!qword) {
13130                 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13131                 return -EOPNOTSUPP;
13132         }
13133
13134         put_unaligned_le64(qword, dsn);
13135
13136         bp->flags |= BNXT_FLAG_DSN_VALID;
13137         return 0;
13138 }
13139
13140 static int bnxt_map_db_bar(struct bnxt *bp)
13141 {
13142         if (!bp->db_size)
13143                 return -ENODEV;
13144         bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13145         if (!bp->bar1)
13146                 return -ENOMEM;
13147         return 0;
13148 }
13149
13150 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13151 {
13152         struct net_device *dev;
13153         struct bnxt *bp;
13154         int rc, max_irqs;
13155
13156         if (pci_is_bridge(pdev))
13157                 return -ENODEV;
13158
13159         /* Clear any pending DMA transactions from crash kernel
13160          * while loading driver in capture kernel.
13161          */
13162         if (is_kdump_kernel()) {
13163                 pci_clear_master(pdev);
13164                 pcie_flr(pdev);
13165         }
13166
13167         max_irqs = bnxt_get_max_irq(pdev);
13168         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13169         if (!dev)
13170                 return -ENOMEM;
13171
13172         bp = netdev_priv(dev);
13173         bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13174         bnxt_set_max_func_irqs(bp, max_irqs);
13175
13176         if (bnxt_vf_pciid(ent->driver_data))
13177                 bp->flags |= BNXT_FLAG_VF;
13178
13179         if (pdev->msix_cap)
13180                 bp->flags |= BNXT_FLAG_MSIX_CAP;
13181
13182         rc = bnxt_init_board(pdev, dev);
13183         if (rc < 0)
13184                 goto init_err_free;
13185
13186         dev->netdev_ops = &bnxt_netdev_ops;
13187         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13188         dev->ethtool_ops = &bnxt_ethtool_ops;
13189         pci_set_drvdata(pdev, dev);
13190
13191         rc = bnxt_alloc_hwrm_resources(bp);
13192         if (rc)
13193                 goto init_err_pci_clean;
13194
13195         mutex_init(&bp->hwrm_cmd_lock);
13196         mutex_init(&bp->link_lock);
13197
13198         rc = bnxt_fw_init_one_p1(bp);
13199         if (rc)
13200                 goto init_err_pci_clean;
13201
13202         if (BNXT_PF(bp))
13203                 bnxt_vpd_read_info(bp);
13204
13205         if (BNXT_CHIP_P5(bp)) {
13206                 bp->flags |= BNXT_FLAG_CHIP_P5;
13207                 if (BNXT_CHIP_SR2(bp))
13208                         bp->flags |= BNXT_FLAG_CHIP_SR2;
13209         }
13210
13211         rc = bnxt_alloc_rss_indir_tbl(bp);
13212         if (rc)
13213                 goto init_err_pci_clean;
13214
13215         rc = bnxt_fw_init_one_p2(bp);
13216         if (rc)
13217                 goto init_err_pci_clean;
13218
13219         rc = bnxt_map_db_bar(bp);
13220         if (rc) {
13221                 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13222                         rc);
13223                 goto init_err_pci_clean;
13224         }
13225
13226         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13227                            NETIF_F_TSO | NETIF_F_TSO6 |
13228                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13229                            NETIF_F_GSO_IPXIP4 |
13230                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13231                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13232                            NETIF_F_RXCSUM | NETIF_F_GRO;
13233
13234         if (BNXT_SUPPORTS_TPA(bp))
13235                 dev->hw_features |= NETIF_F_LRO;
13236
13237         dev->hw_enc_features =
13238                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13239                         NETIF_F_TSO | NETIF_F_TSO6 |
13240                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13241                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13242                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13243         dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13244
13245         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13246                                     NETIF_F_GSO_GRE_CSUM;
13247         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13248         if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13249                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13250         if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13251                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13252         if (BNXT_SUPPORTS_TPA(bp))
13253                 dev->hw_features |= NETIF_F_GRO_HW;
13254         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13255         if (dev->features & NETIF_F_GRO_HW)
13256                 dev->features &= ~NETIF_F_LRO;
13257         dev->priv_flags |= IFF_UNICAST_FLT;
13258
13259 #ifdef CONFIG_BNXT_SRIOV
13260         init_waitqueue_head(&bp->sriov_cfg_wait);
13261         mutex_init(&bp->sriov_lock);
13262 #endif
13263         if (BNXT_SUPPORTS_TPA(bp)) {
13264                 bp->gro_func = bnxt_gro_func_5730x;
13265                 if (BNXT_CHIP_P4(bp))
13266                         bp->gro_func = bnxt_gro_func_5731x;
13267                 else if (BNXT_CHIP_P5(bp))
13268                         bp->gro_func = bnxt_gro_func_5750x;
13269         }
13270         if (!BNXT_CHIP_P4_PLUS(bp))
13271                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13272
13273         rc = bnxt_init_mac_addr(bp);
13274         if (rc) {
13275                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13276                 rc = -EADDRNOTAVAIL;
13277                 goto init_err_pci_clean;
13278         }
13279
13280         if (BNXT_PF(bp)) {
13281                 /* Read the adapter's DSN to use as the eswitch switch_id */
13282                 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13283         }
13284
13285         /* MTU range: 60 - FW defined max */
13286         dev->min_mtu = ETH_ZLEN;
13287         dev->max_mtu = bp->max_mtu;
13288
13289         rc = bnxt_probe_phy(bp, true);
13290         if (rc)
13291                 goto init_err_pci_clean;
13292
13293         bnxt_set_rx_skb_mode(bp, false);
13294         bnxt_set_tpa_flags(bp);
13295         bnxt_set_ring_params(bp);
13296         rc = bnxt_set_dflt_rings(bp, true);
13297         if (rc) {
13298                 netdev_err(bp->dev, "Not enough rings available.\n");
13299                 rc = -ENOMEM;
13300                 goto init_err_pci_clean;
13301         }
13302
13303         bnxt_fw_init_one_p3(bp);
13304
13305         if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13306                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13307
13308         rc = bnxt_init_int_mode(bp);
13309         if (rc)
13310                 goto init_err_pci_clean;
13311
13312         /* No TC has been set yet and rings may have been trimmed due to
13313          * limited MSIX, so we re-initialize the TX rings per TC.
13314          */
13315         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13316
13317         if (BNXT_PF(bp)) {
13318                 if (!bnxt_pf_wq) {
13319                         bnxt_pf_wq =
13320                                 create_singlethread_workqueue("bnxt_pf_wq");
13321                         if (!bnxt_pf_wq) {
13322                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13323                                 rc = -ENOMEM;
13324                                 goto init_err_pci_clean;
13325                         }
13326                 }
13327                 rc = bnxt_init_tc(bp);
13328                 if (rc)
13329                         netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13330                                    rc);
13331         }
13332
13333         bnxt_inv_fw_health_reg(bp);
13334         bnxt_dl_register(bp);
13335
13336         rc = register_netdev(dev);
13337         if (rc)
13338                 goto init_err_cleanup;
13339
13340         if (BNXT_PF(bp))
13341                 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13342         bnxt_dl_fw_reporters_create(bp);
13343
13344         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13345                     board_info[ent->driver_data].name,
13346                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
13347         pcie_print_link_status(pdev);
13348
13349         pci_save_state(pdev);
13350         return 0;
13351
13352 init_err_cleanup:
13353         bnxt_dl_unregister(bp);
13354         bnxt_shutdown_tc(bp);
13355         bnxt_clear_int_mode(bp);
13356
13357 init_err_pci_clean:
13358         bnxt_hwrm_func_drv_unrgtr(bp);
13359         bnxt_free_hwrm_short_cmd_req(bp);
13360         bnxt_free_hwrm_resources(bp);
13361         bnxt_ethtool_free(bp);
13362         kfree(bp->ptp_cfg);
13363         bp->ptp_cfg = NULL;
13364         kfree(bp->fw_health);
13365         bp->fw_health = NULL;
13366         bnxt_cleanup_pci(bp);
13367         bnxt_free_ctx_mem(bp);
13368         kfree(bp->ctx);
13369         bp->ctx = NULL;
13370         kfree(bp->rss_indir_tbl);
13371         bp->rss_indir_tbl = NULL;
13372
13373 init_err_free:
13374         free_netdev(dev);
13375         return rc;
13376 }
13377
13378 static void bnxt_shutdown(struct pci_dev *pdev)
13379 {
13380         struct net_device *dev = pci_get_drvdata(pdev);
13381         struct bnxt *bp;
13382
13383         if (!dev)
13384                 return;
13385
13386         rtnl_lock();
13387         bp = netdev_priv(dev);
13388         if (!bp)
13389                 goto shutdown_exit;
13390
13391         if (netif_running(dev))
13392                 dev_close(dev);
13393
13394         bnxt_ulp_shutdown(bp);
13395         bnxt_clear_int_mode(bp);
13396         pci_disable_device(pdev);
13397
13398         if (system_state == SYSTEM_POWER_OFF) {
13399                 pci_wake_from_d3(pdev, bp->wol);
13400                 pci_set_power_state(pdev, PCI_D3hot);
13401         }
13402
13403 shutdown_exit:
13404         rtnl_unlock();
13405 }
13406
13407 #ifdef CONFIG_PM_SLEEP
13408 static int bnxt_suspend(struct device *device)
13409 {
13410         struct net_device *dev = dev_get_drvdata(device);
13411         struct bnxt *bp = netdev_priv(dev);
13412         int rc = 0;
13413
13414         rtnl_lock();
13415         bnxt_ulp_stop(bp);
13416         if (netif_running(dev)) {
13417                 netif_device_detach(dev);
13418                 rc = bnxt_close(dev);
13419         }
13420         bnxt_hwrm_func_drv_unrgtr(bp);
13421         pci_disable_device(bp->pdev);
13422         bnxt_free_ctx_mem(bp);
13423         kfree(bp->ctx);
13424         bp->ctx = NULL;
13425         rtnl_unlock();
13426         return rc;
13427 }
13428
13429 static int bnxt_resume(struct device *device)
13430 {
13431         struct net_device *dev = dev_get_drvdata(device);
13432         struct bnxt *bp = netdev_priv(dev);
13433         int rc = 0;
13434
13435         rtnl_lock();
13436         rc = pci_enable_device(bp->pdev);
13437         if (rc) {
13438                 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13439                            rc);
13440                 goto resume_exit;
13441         }
13442         pci_set_master(bp->pdev);
13443         if (bnxt_hwrm_ver_get(bp)) {
13444                 rc = -ENODEV;
13445                 goto resume_exit;
13446         }
13447         rc = bnxt_hwrm_func_reset(bp);
13448         if (rc) {
13449                 rc = -EBUSY;
13450                 goto resume_exit;
13451         }
13452
13453         rc = bnxt_hwrm_func_qcaps(bp);
13454         if (rc)
13455                 goto resume_exit;
13456
13457         if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13458                 rc = -ENODEV;
13459                 goto resume_exit;
13460         }
13461
13462         bnxt_get_wol_settings(bp);
13463         if (netif_running(dev)) {
13464                 rc = bnxt_open(dev);
13465                 if (!rc)
13466                         netif_device_attach(dev);
13467         }
13468
13469 resume_exit:
13470         bnxt_ulp_start(bp, rc);
13471         if (!rc)
13472                 bnxt_reenable_sriov(bp);
13473         rtnl_unlock();
13474         return rc;
13475 }
13476
13477 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13478 #define BNXT_PM_OPS (&bnxt_pm_ops)
13479
13480 #else
13481
13482 #define BNXT_PM_OPS NULL
13483
13484 #endif /* CONFIG_PM_SLEEP */
13485
13486 /**
13487  * bnxt_io_error_detected - called when PCI error is detected
13488  * @pdev: Pointer to PCI device
13489  * @state: The current pci connection state
13490  *
13491  * This function is called after a PCI bus error affecting
13492  * this device has been detected.
13493  */
13494 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13495                                                pci_channel_state_t state)
13496 {
13497         struct net_device *netdev = pci_get_drvdata(pdev);
13498         struct bnxt *bp = netdev_priv(netdev);
13499
13500         netdev_info(netdev, "PCI I/O error detected\n");
13501
13502         rtnl_lock();
13503         netif_device_detach(netdev);
13504
13505         bnxt_ulp_stop(bp);
13506
13507         if (state == pci_channel_io_perm_failure) {
13508                 rtnl_unlock();
13509                 return PCI_ERS_RESULT_DISCONNECT;
13510         }
13511
13512         if (state == pci_channel_io_frozen)
13513                 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13514
13515         if (netif_running(netdev))
13516                 bnxt_close(netdev);
13517
13518         if (pci_is_enabled(pdev))
13519                 pci_disable_device(pdev);
13520         bnxt_free_ctx_mem(bp);
13521         kfree(bp->ctx);
13522         bp->ctx = NULL;
13523         rtnl_unlock();
13524
13525         /* Request a slot slot reset. */
13526         return PCI_ERS_RESULT_NEED_RESET;
13527 }
13528
13529 /**
13530  * bnxt_io_slot_reset - called after the pci bus has been reset.
13531  * @pdev: Pointer to PCI device
13532  *
13533  * Restart the card from scratch, as if from a cold-boot.
13534  * At this point, the card has exprienced a hard reset,
13535  * followed by fixups by BIOS, and has its config space
13536  * set up identically to what it was at cold boot.
13537  */
13538 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13539 {
13540         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13541         struct net_device *netdev = pci_get_drvdata(pdev);
13542         struct bnxt *bp = netdev_priv(netdev);
13543         int err = 0, off;
13544
13545         netdev_info(bp->dev, "PCI Slot Reset\n");
13546
13547         rtnl_lock();
13548
13549         if (pci_enable_device(pdev)) {
13550                 dev_err(&pdev->dev,
13551                         "Cannot re-enable PCI device after reset.\n");
13552         } else {
13553                 pci_set_master(pdev);
13554                 /* Upon fatal error, our device internal logic that latches to
13555                  * BAR value is getting reset and will restore only upon
13556                  * rewritting the BARs.
13557                  *
13558                  * As pci_restore_state() does not re-write the BARs if the
13559                  * value is same as saved value earlier, driver needs to
13560                  * write the BARs to 0 to force restore, in case of fatal error.
13561                  */
13562                 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13563                                        &bp->state)) {
13564                         for (off = PCI_BASE_ADDRESS_0;
13565                              off <= PCI_BASE_ADDRESS_5; off += 4)
13566                                 pci_write_config_dword(bp->pdev, off, 0);
13567                 }
13568                 pci_restore_state(pdev);
13569                 pci_save_state(pdev);
13570
13571                 err = bnxt_hwrm_func_reset(bp);
13572                 if (!err)
13573                         result = PCI_ERS_RESULT_RECOVERED;
13574         }
13575
13576         rtnl_unlock();
13577
13578         return result;
13579 }
13580
13581 /**
13582  * bnxt_io_resume - called when traffic can start flowing again.
13583  * @pdev: Pointer to PCI device
13584  *
13585  * This callback is called when the error recovery driver tells
13586  * us that its OK to resume normal operation.
13587  */
13588 static void bnxt_io_resume(struct pci_dev *pdev)
13589 {
13590         struct net_device *netdev = pci_get_drvdata(pdev);
13591         struct bnxt *bp = netdev_priv(netdev);
13592         int err;
13593
13594         netdev_info(bp->dev, "PCI Slot Resume\n");
13595         rtnl_lock();
13596
13597         err = bnxt_hwrm_func_qcaps(bp);
13598         if (!err && netif_running(netdev))
13599                 err = bnxt_open(netdev);
13600
13601         bnxt_ulp_start(bp, err);
13602         if (!err) {
13603                 bnxt_reenable_sriov(bp);
13604                 netif_device_attach(netdev);
13605         }
13606
13607         rtnl_unlock();
13608 }
13609
13610 static const struct pci_error_handlers bnxt_err_handler = {
13611         .error_detected = bnxt_io_error_detected,
13612         .slot_reset     = bnxt_io_slot_reset,
13613         .resume         = bnxt_io_resume
13614 };
13615
13616 static struct pci_driver bnxt_pci_driver = {
13617         .name           = DRV_MODULE_NAME,
13618         .id_table       = bnxt_pci_tbl,
13619         .probe          = bnxt_init_one,
13620         .remove         = bnxt_remove_one,
13621         .shutdown       = bnxt_shutdown,
13622         .driver.pm      = BNXT_PM_OPS,
13623         .err_handler    = &bnxt_err_handler,
13624 #if defined(CONFIG_BNXT_SRIOV)
13625         .sriov_configure = bnxt_sriov_configure,
13626 #endif
13627 };
13628
13629 static int __init bnxt_init(void)
13630 {
13631         bnxt_debug_init();
13632         return pci_register_driver(&bnxt_pci_driver);
13633 }
13634
13635 static void __exit bnxt_exit(void)
13636 {
13637         pci_unregister_driver(&bnxt_pci_driver);
13638         if (bnxt_pf_wq)
13639                 destroy_workqueue(bnxt_pf_wq);
13640         bnxt_debug_exit();
13641 }
13642
13643 module_init(bnxt_init);
13644 module_exit(bnxt_exit);