phy: amlogic: fix error path in phy_g12a_usb3_pcie_probe()
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 #include <linux/bitmap.h>
53 #include <linux/cpu_rmap.h>
54 #include <linux/cpumask.h>
55 #include <net/pkt_cls.h>
56 #include <linux/hwmon.h>
57 #include <linux/hwmon-sysfs.h>
58 #include <net/page_pool.h>
59
60 #include "bnxt_hsi.h"
61 #include "bnxt.h"
62 #include "bnxt_hwrm.h"
63 #include "bnxt_ulp.h"
64 #include "bnxt_sriov.h"
65 #include "bnxt_ethtool.h"
66 #include "bnxt_dcb.h"
67 #include "bnxt_xdp.h"
68 #include "bnxt_ptp.h"
69 #include "bnxt_vfr.h"
70 #include "bnxt_tc.h"
71 #include "bnxt_devlink.h"
72 #include "bnxt_debugfs.h"
73
74 #define BNXT_TX_TIMEOUT         (5 * HZ)
75 #define BNXT_DEF_MSG_ENABLE     (NETIF_MSG_DRV | NETIF_MSG_HW | \
76                                  NETIF_MSG_TX_ERR)
77
78 MODULE_LICENSE("GPL");
79 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
80
81 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
82 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
83 #define BNXT_RX_COPY_THRESH 256
84
85 #define BNXT_TX_PUSH_THRESH 164
86
87 /* indexed by enum board_idx */
88 static const struct {
89         char *name;
90 } board_info[] = {
91         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
92         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
93         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
94         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
95         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
96         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
97         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
98         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
99         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
100         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
101         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
102         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
103         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
104         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
105         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
106         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
107         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
108         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
109         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
110         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
111         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
112         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
113         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
114         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
115         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
116         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
117         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
118         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
119         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
120         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
121         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
122         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
123         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
124         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
125         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
126         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
127         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
128         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
129         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
130         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
131         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
132         [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
133         [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
134         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
135         [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
136 };
137
138 static const struct pci_device_id bnxt_pci_tbl[] = {
139         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
140         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
141         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
142         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
143         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
144         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
145         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
146         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
147         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
148         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
149         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
150         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
151         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
152         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
153         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
154         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
155         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
156         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
157         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
158         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
159         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
160         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
161         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
162         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
163         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
164         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
165         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
166         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
167         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
168         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
169         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
170         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
171         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
172         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
173         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
174         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
175         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
176         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
177         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
178         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
179         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
180         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
181         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
182         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
183         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
184         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
185 #ifdef CONFIG_BNXT_SRIOV
186         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
187         { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
188         { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
189         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
190         { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
191         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
192         { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
193         { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
194         { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
195         { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
196         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
197         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
198         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
199         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
200         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
201         { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
202         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
203         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
204         { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
205         { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
206         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
207 #endif
208         { 0 }
209 };
210
211 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
212
213 static const u16 bnxt_vf_req_snif[] = {
214         HWRM_FUNC_CFG,
215         HWRM_FUNC_VF_CFG,
216         HWRM_PORT_PHY_QCFG,
217         HWRM_CFA_L2_FILTER_ALLOC,
218 };
219
220 static const u16 bnxt_async_events_arr[] = {
221         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
222         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
223         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
224         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
225         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
226         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
227         ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
228         ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
229         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
230         ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
231         ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
232         ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
233         ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
234         ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
235         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
236         ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
237 };
238
239 static struct workqueue_struct *bnxt_pf_wq;
240
241 static bool bnxt_vf_pciid(enum board_idx idx)
242 {
243         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
244                 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
245                 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
246                 idx == NETXTREME_E_P5_VF_HV);
247 }
248
249 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
250 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
251 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
252
253 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
254                 writel(DB_CP_IRQ_DIS_FLAGS, db)
255
256 #define BNXT_DB_CQ(db, idx)                                             \
257         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
258
259 #define BNXT_DB_NQ_P5(db, idx)                                          \
260         bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx),   \
261                     (db)->doorbell)
262
263 #define BNXT_DB_CQ_ARM(db, idx)                                         \
264         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
265
266 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
267         bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
268                     (db)->doorbell)
269
270 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
271 {
272         if (bp->flags & BNXT_FLAG_CHIP_P5)
273                 BNXT_DB_NQ_P5(db, idx);
274         else
275                 BNXT_DB_CQ(db, idx);
276 }
277
278 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
279 {
280         if (bp->flags & BNXT_FLAG_CHIP_P5)
281                 BNXT_DB_NQ_ARM_P5(db, idx);
282         else
283                 BNXT_DB_CQ_ARM(db, idx);
284 }
285
286 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
287 {
288         if (bp->flags & BNXT_FLAG_CHIP_P5)
289                 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
290                             RING_CMP(idx), db->doorbell);
291         else
292                 BNXT_DB_CQ(db, idx);
293 }
294
295 const u16 bnxt_lhint_arr[] = {
296         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
297         TX_BD_FLAGS_LHINT_512_TO_1023,
298         TX_BD_FLAGS_LHINT_1024_TO_2047,
299         TX_BD_FLAGS_LHINT_1024_TO_2047,
300         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
301         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
302         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
303         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
304         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
308         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
309         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
310         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
311         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
312         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
313         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
314         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
315 };
316
317 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
318 {
319         struct metadata_dst *md_dst = skb_metadata_dst(skb);
320
321         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
322                 return 0;
323
324         return md_dst->u.port_info.port_id;
325 }
326
327 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
328                              u16 prod)
329 {
330         bnxt_db_write(bp, &txr->tx_db, prod);
331         txr->kick_pending = 0;
332 }
333
334 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
335                                           struct bnxt_tx_ring_info *txr,
336                                           struct netdev_queue *txq)
337 {
338         netif_tx_stop_queue(txq);
339
340         /* netif_tx_stop_queue() must be done before checking
341          * tx index in bnxt_tx_avail() below, because in
342          * bnxt_tx_int(), we update tx index before checking for
343          * netif_tx_queue_stopped().
344          */
345         smp_mb();
346         if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
347                 netif_tx_wake_queue(txq);
348                 return false;
349         }
350
351         return true;
352 }
353
354 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
355 {
356         struct bnxt *bp = netdev_priv(dev);
357         struct tx_bd *txbd;
358         struct tx_bd_ext *txbd1;
359         struct netdev_queue *txq;
360         int i;
361         dma_addr_t mapping;
362         unsigned int length, pad = 0;
363         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
364         u16 prod, last_frag;
365         struct pci_dev *pdev = bp->pdev;
366         struct bnxt_tx_ring_info *txr;
367         struct bnxt_sw_tx_bd *tx_buf;
368         __le32 lflags = 0;
369
370         i = skb_get_queue_mapping(skb);
371         if (unlikely(i >= bp->tx_nr_rings)) {
372                 dev_kfree_skb_any(skb);
373                 dev_core_stats_tx_dropped_inc(dev);
374                 return NETDEV_TX_OK;
375         }
376
377         txq = netdev_get_tx_queue(dev, i);
378         txr = &bp->tx_ring[bp->tx_ring_map[i]];
379         prod = txr->tx_prod;
380
381         free_size = bnxt_tx_avail(bp, txr);
382         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
383                 /* We must have raced with NAPI cleanup */
384                 if (net_ratelimit() && txr->kick_pending)
385                         netif_warn(bp, tx_err, dev,
386                                    "bnxt: ring busy w/ flush pending!\n");
387                 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
388                         return NETDEV_TX_BUSY;
389         }
390
391         length = skb->len;
392         len = skb_headlen(skb);
393         last_frag = skb_shinfo(skb)->nr_frags;
394
395         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
396
397         txbd->tx_bd_opaque = prod;
398
399         tx_buf = &txr->tx_buf_ring[prod];
400         tx_buf->skb = skb;
401         tx_buf->nr_frags = last_frag;
402
403         vlan_tag_flags = 0;
404         cfa_action = bnxt_xmit_get_cfa_action(skb);
405         if (skb_vlan_tag_present(skb)) {
406                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
407                                  skb_vlan_tag_get(skb);
408                 /* Currently supports 8021Q, 8021AD vlan offloads
409                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
410                  */
411                 if (skb->vlan_proto == htons(ETH_P_8021Q))
412                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
413         }
414
415         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
416                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
417
418                 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
419                     atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
420                         if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
421                                             &ptp->tx_hdr_off)) {
422                                 if (vlan_tag_flags)
423                                         ptp->tx_hdr_off += VLAN_HLEN;
424                                 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
425                                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
426                         } else {
427                                 atomic_inc(&bp->ptp_cfg->tx_avail);
428                         }
429                 }
430         }
431
432         if (unlikely(skb->no_fcs))
433                 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
434
435         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
436             !lflags) {
437                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
438                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
439                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
440                 void __iomem *db = txr->tx_db.doorbell;
441                 void *pdata = tx_push_buf->data;
442                 u64 *end;
443                 int j, push_len;
444
445                 /* Set COAL_NOW to be ready quickly for the next push */
446                 tx_push->tx_bd_len_flags_type =
447                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
448                                         TX_BD_TYPE_LONG_TX_BD |
449                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
450                                         TX_BD_FLAGS_COAL_NOW |
451                                         TX_BD_FLAGS_PACKET_END |
452                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
453
454                 if (skb->ip_summed == CHECKSUM_PARTIAL)
455                         tx_push1->tx_bd_hsize_lflags =
456                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
457                 else
458                         tx_push1->tx_bd_hsize_lflags = 0;
459
460                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
461                 tx_push1->tx_bd_cfa_action =
462                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
463
464                 end = pdata + length;
465                 end = PTR_ALIGN(end, 8) - 1;
466                 *end = 0;
467
468                 skb_copy_from_linear_data(skb, pdata, len);
469                 pdata += len;
470                 for (j = 0; j < last_frag; j++) {
471                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
472                         void *fptr;
473
474                         fptr = skb_frag_address_safe(frag);
475                         if (!fptr)
476                                 goto normal_tx;
477
478                         memcpy(pdata, fptr, skb_frag_size(frag));
479                         pdata += skb_frag_size(frag);
480                 }
481
482                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
483                 txbd->tx_bd_haddr = txr->data_mapping;
484                 prod = NEXT_TX(prod);
485                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
486                 memcpy(txbd, tx_push1, sizeof(*txbd));
487                 prod = NEXT_TX(prod);
488                 tx_push->doorbell =
489                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
490                 txr->tx_prod = prod;
491
492                 tx_buf->is_push = 1;
493                 netdev_tx_sent_queue(txq, skb->len);
494                 wmb();  /* Sync is_push and byte queue before pushing data */
495
496                 push_len = (length + sizeof(*tx_push) + 7) / 8;
497                 if (push_len > 16) {
498                         __iowrite64_copy(db, tx_push_buf, 16);
499                         __iowrite32_copy(db + 4, tx_push_buf + 1,
500                                          (push_len - 16) << 1);
501                 } else {
502                         __iowrite64_copy(db, tx_push_buf, push_len);
503                 }
504
505                 goto tx_done;
506         }
507
508 normal_tx:
509         if (length < BNXT_MIN_PKT_SIZE) {
510                 pad = BNXT_MIN_PKT_SIZE - length;
511                 if (skb_pad(skb, pad))
512                         /* SKB already freed. */
513                         goto tx_kick_pending;
514                 length = BNXT_MIN_PKT_SIZE;
515         }
516
517         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
518
519         if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
520                 goto tx_free;
521
522         dma_unmap_addr_set(tx_buf, mapping, mapping);
523         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
524                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
525
526         txbd->tx_bd_haddr = cpu_to_le64(mapping);
527
528         prod = NEXT_TX(prod);
529         txbd1 = (struct tx_bd_ext *)
530                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
531
532         txbd1->tx_bd_hsize_lflags = lflags;
533         if (skb_is_gso(skb)) {
534                 u32 hdr_len;
535
536                 if (skb->encapsulation)
537                         hdr_len = skb_inner_network_offset(skb) +
538                                 skb_inner_network_header_len(skb) +
539                                 inner_tcp_hdrlen(skb);
540                 else
541                         hdr_len = skb_transport_offset(skb) +
542                                 tcp_hdrlen(skb);
543
544                 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
545                                         TX_BD_FLAGS_T_IPID |
546                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
547                 length = skb_shinfo(skb)->gso_size;
548                 txbd1->tx_bd_mss = cpu_to_le32(length);
549                 length += hdr_len;
550         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
551                 txbd1->tx_bd_hsize_lflags |=
552                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
553                 txbd1->tx_bd_mss = 0;
554         }
555
556         length >>= 9;
557         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
558                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
559                                      skb->len);
560                 i = 0;
561                 goto tx_dma_error;
562         }
563         flags |= bnxt_lhint_arr[length];
564         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
565
566         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
567         txbd1->tx_bd_cfa_action =
568                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
569         for (i = 0; i < last_frag; i++) {
570                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
571
572                 prod = NEXT_TX(prod);
573                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
574
575                 len = skb_frag_size(frag);
576                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
577                                            DMA_TO_DEVICE);
578
579                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
580                         goto tx_dma_error;
581
582                 tx_buf = &txr->tx_buf_ring[prod];
583                 dma_unmap_addr_set(tx_buf, mapping, mapping);
584
585                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
586
587                 flags = len << TX_BD_LEN_SHIFT;
588                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
589         }
590
591         flags &= ~TX_BD_LEN;
592         txbd->tx_bd_len_flags_type =
593                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
594                             TX_BD_FLAGS_PACKET_END);
595
596         netdev_tx_sent_queue(txq, skb->len);
597
598         skb_tx_timestamp(skb);
599
600         /* Sync BD data before updating doorbell */
601         wmb();
602
603         prod = NEXT_TX(prod);
604         txr->tx_prod = prod;
605
606         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
607                 bnxt_txr_db_kick(bp, txr, prod);
608         else
609                 txr->kick_pending = 1;
610
611 tx_done:
612
613         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
614                 if (netdev_xmit_more() && !tx_buf->is_push)
615                         bnxt_txr_db_kick(bp, txr, prod);
616
617                 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
618         }
619         return NETDEV_TX_OK;
620
621 tx_dma_error:
622         if (BNXT_TX_PTP_IS_SET(lflags))
623                 atomic_inc(&bp->ptp_cfg->tx_avail);
624
625         last_frag = i;
626
627         /* start back at beginning and unmap skb */
628         prod = txr->tx_prod;
629         tx_buf = &txr->tx_buf_ring[prod];
630         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
631                          skb_headlen(skb), DMA_TO_DEVICE);
632         prod = NEXT_TX(prod);
633
634         /* unmap remaining mapped pages */
635         for (i = 0; i < last_frag; i++) {
636                 prod = NEXT_TX(prod);
637                 tx_buf = &txr->tx_buf_ring[prod];
638                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
639                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
640                                DMA_TO_DEVICE);
641         }
642
643 tx_free:
644         dev_kfree_skb_any(skb);
645 tx_kick_pending:
646         if (txr->kick_pending)
647                 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
648         txr->tx_buf_ring[txr->tx_prod].skb = NULL;
649         dev_core_stats_tx_dropped_inc(dev);
650         return NETDEV_TX_OK;
651 }
652
653 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
654 {
655         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
656         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
657         u16 cons = txr->tx_cons;
658         struct pci_dev *pdev = bp->pdev;
659         int i;
660         unsigned int tx_bytes = 0;
661
662         for (i = 0; i < nr_pkts; i++) {
663                 struct bnxt_sw_tx_bd *tx_buf;
664                 bool compl_deferred = false;
665                 struct sk_buff *skb;
666                 int j, last;
667
668                 tx_buf = &txr->tx_buf_ring[cons];
669                 cons = NEXT_TX(cons);
670                 skb = tx_buf->skb;
671                 tx_buf->skb = NULL;
672
673                 if (tx_buf->is_push) {
674                         tx_buf->is_push = 0;
675                         goto next_tx_int;
676                 }
677
678                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
679                                  skb_headlen(skb), DMA_TO_DEVICE);
680                 last = tx_buf->nr_frags;
681
682                 for (j = 0; j < last; j++) {
683                         cons = NEXT_TX(cons);
684                         tx_buf = &txr->tx_buf_ring[cons];
685                         dma_unmap_page(
686                                 &pdev->dev,
687                                 dma_unmap_addr(tx_buf, mapping),
688                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
689                                 DMA_TO_DEVICE);
690                 }
691                 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
692                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
693                                 if (!bnxt_get_tx_ts_p5(bp, skb))
694                                         compl_deferred = true;
695                                 else
696                                         atomic_inc(&bp->ptp_cfg->tx_avail);
697                         }
698                 }
699
700 next_tx_int:
701                 cons = NEXT_TX(cons);
702
703                 tx_bytes += skb->len;
704                 if (!compl_deferred)
705                         dev_kfree_skb_any(skb);
706         }
707
708         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
709         txr->tx_cons = cons;
710
711         /* Need to make the tx_cons update visible to bnxt_start_xmit()
712          * before checking for netif_tx_queue_stopped().  Without the
713          * memory barrier, there is a small possibility that bnxt_start_xmit()
714          * will miss it and cause the queue to be stopped forever.
715          */
716         smp_mb();
717
718         if (unlikely(netif_tx_queue_stopped(txq)) &&
719             bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
720             READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
721                 netif_tx_wake_queue(txq);
722 }
723
724 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
725                                          struct bnxt_rx_ring_info *rxr,
726                                          gfp_t gfp)
727 {
728         struct device *dev = &bp->pdev->dev;
729         struct page *page;
730
731         page = page_pool_dev_alloc_pages(rxr->page_pool);
732         if (!page)
733                 return NULL;
734
735         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
736                                       DMA_ATTR_WEAK_ORDERING);
737         if (dma_mapping_error(dev, *mapping)) {
738                 page_pool_recycle_direct(rxr->page_pool, page);
739                 return NULL;
740         }
741         *mapping += bp->rx_dma_offset;
742         return page;
743 }
744
745 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
746                                        gfp_t gfp)
747 {
748         u8 *data;
749         struct pci_dev *pdev = bp->pdev;
750
751         if (gfp == GFP_ATOMIC)
752                 data = napi_alloc_frag(bp->rx_buf_size);
753         else
754                 data = netdev_alloc_frag(bp->rx_buf_size);
755         if (!data)
756                 return NULL;
757
758         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
759                                         bp->rx_buf_use_size, bp->rx_dir,
760                                         DMA_ATTR_WEAK_ORDERING);
761
762         if (dma_mapping_error(&pdev->dev, *mapping)) {
763                 skb_free_frag(data);
764                 data = NULL;
765         }
766         return data;
767 }
768
769 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
770                        u16 prod, gfp_t gfp)
771 {
772         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
773         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
774         dma_addr_t mapping;
775
776         if (BNXT_RX_PAGE_MODE(bp)) {
777                 struct page *page =
778                         __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
779
780                 if (!page)
781                         return -ENOMEM;
782
783                 rx_buf->data = page;
784                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
785         } else {
786                 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
787
788                 if (!data)
789                         return -ENOMEM;
790
791                 rx_buf->data = data;
792                 rx_buf->data_ptr = data + bp->rx_offset;
793         }
794         rx_buf->mapping = mapping;
795
796         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
797         return 0;
798 }
799
800 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
801 {
802         u16 prod = rxr->rx_prod;
803         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
804         struct rx_bd *cons_bd, *prod_bd;
805
806         prod_rx_buf = &rxr->rx_buf_ring[prod];
807         cons_rx_buf = &rxr->rx_buf_ring[cons];
808
809         prod_rx_buf->data = data;
810         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
811
812         prod_rx_buf->mapping = cons_rx_buf->mapping;
813
814         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
815         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
816
817         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
818 }
819
820 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
821 {
822         u16 next, max = rxr->rx_agg_bmap_size;
823
824         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
825         if (next >= max)
826                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
827         return next;
828 }
829
830 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
831                                      struct bnxt_rx_ring_info *rxr,
832                                      u16 prod, gfp_t gfp)
833 {
834         struct rx_bd *rxbd =
835                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
836         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
837         struct pci_dev *pdev = bp->pdev;
838         struct page *page;
839         dma_addr_t mapping;
840         u16 sw_prod = rxr->rx_sw_agg_prod;
841         unsigned int offset = 0;
842
843         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
844                 page = rxr->rx_page;
845                 if (!page) {
846                         page = alloc_page(gfp);
847                         if (!page)
848                                 return -ENOMEM;
849                         rxr->rx_page = page;
850                         rxr->rx_page_offset = 0;
851                 }
852                 offset = rxr->rx_page_offset;
853                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
854                 if (rxr->rx_page_offset == PAGE_SIZE)
855                         rxr->rx_page = NULL;
856                 else
857                         get_page(page);
858         } else {
859                 page = alloc_page(gfp);
860                 if (!page)
861                         return -ENOMEM;
862         }
863
864         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
865                                      BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
866                                      DMA_ATTR_WEAK_ORDERING);
867         if (dma_mapping_error(&pdev->dev, mapping)) {
868                 __free_page(page);
869                 return -EIO;
870         }
871
872         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
873                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
874
875         __set_bit(sw_prod, rxr->rx_agg_bmap);
876         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
877         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
878
879         rx_agg_buf->page = page;
880         rx_agg_buf->offset = offset;
881         rx_agg_buf->mapping = mapping;
882         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
883         rxbd->rx_bd_opaque = sw_prod;
884         return 0;
885 }
886
887 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
888                                        struct bnxt_cp_ring_info *cpr,
889                                        u16 cp_cons, u16 curr)
890 {
891         struct rx_agg_cmp *agg;
892
893         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
894         agg = (struct rx_agg_cmp *)
895                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
896         return agg;
897 }
898
899 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
900                                               struct bnxt_rx_ring_info *rxr,
901                                               u16 agg_id, u16 curr)
902 {
903         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
904
905         return &tpa_info->agg_arr[curr];
906 }
907
908 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
909                                    u16 start, u32 agg_bufs, bool tpa)
910 {
911         struct bnxt_napi *bnapi = cpr->bnapi;
912         struct bnxt *bp = bnapi->bp;
913         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
914         u16 prod = rxr->rx_agg_prod;
915         u16 sw_prod = rxr->rx_sw_agg_prod;
916         bool p5_tpa = false;
917         u32 i;
918
919         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
920                 p5_tpa = true;
921
922         for (i = 0; i < agg_bufs; i++) {
923                 u16 cons;
924                 struct rx_agg_cmp *agg;
925                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
926                 struct rx_bd *prod_bd;
927                 struct page *page;
928
929                 if (p5_tpa)
930                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
931                 else
932                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
933                 cons = agg->rx_agg_cmp_opaque;
934                 __clear_bit(cons, rxr->rx_agg_bmap);
935
936                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
937                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
938
939                 __set_bit(sw_prod, rxr->rx_agg_bmap);
940                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
941                 cons_rx_buf = &rxr->rx_agg_ring[cons];
942
943                 /* It is possible for sw_prod to be equal to cons, so
944                  * set cons_rx_buf->page to NULL first.
945                  */
946                 page = cons_rx_buf->page;
947                 cons_rx_buf->page = NULL;
948                 prod_rx_buf->page = page;
949                 prod_rx_buf->offset = cons_rx_buf->offset;
950
951                 prod_rx_buf->mapping = cons_rx_buf->mapping;
952
953                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
954
955                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
956                 prod_bd->rx_bd_opaque = sw_prod;
957
958                 prod = NEXT_RX_AGG(prod);
959                 sw_prod = NEXT_RX_AGG(sw_prod);
960         }
961         rxr->rx_agg_prod = prod;
962         rxr->rx_sw_agg_prod = sw_prod;
963 }
964
965 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
966                                         struct bnxt_rx_ring_info *rxr,
967                                         u16 cons, void *data, u8 *data_ptr,
968                                         dma_addr_t dma_addr,
969                                         unsigned int offset_and_len)
970 {
971         unsigned int payload = offset_and_len >> 16;
972         unsigned int len = offset_and_len & 0xffff;
973         skb_frag_t *frag;
974         struct page *page = data;
975         u16 prod = rxr->rx_prod;
976         struct sk_buff *skb;
977         int off, err;
978
979         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
980         if (unlikely(err)) {
981                 bnxt_reuse_rx_data(rxr, cons, data);
982                 return NULL;
983         }
984         dma_addr -= bp->rx_dma_offset;
985         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
986                              DMA_ATTR_WEAK_ORDERING);
987         page_pool_release_page(rxr->page_pool, page);
988
989         if (unlikely(!payload))
990                 payload = eth_get_headlen(bp->dev, data_ptr, len);
991
992         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
993         if (!skb) {
994                 __free_page(page);
995                 return NULL;
996         }
997
998         off = (void *)data_ptr - page_address(page);
999         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1000         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1001                payload + NET_IP_ALIGN);
1002
1003         frag = &skb_shinfo(skb)->frags[0];
1004         skb_frag_size_sub(frag, payload);
1005         skb_frag_off_add(frag, payload);
1006         skb->data_len -= payload;
1007         skb->tail += payload;
1008
1009         return skb;
1010 }
1011
1012 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1013                                    struct bnxt_rx_ring_info *rxr, u16 cons,
1014                                    void *data, u8 *data_ptr,
1015                                    dma_addr_t dma_addr,
1016                                    unsigned int offset_and_len)
1017 {
1018         u16 prod = rxr->rx_prod;
1019         struct sk_buff *skb;
1020         int err;
1021
1022         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1023         if (unlikely(err)) {
1024                 bnxt_reuse_rx_data(rxr, cons, data);
1025                 return NULL;
1026         }
1027
1028         skb = build_skb(data, bp->rx_buf_size);
1029         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1030                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1031         if (!skb) {
1032                 skb_free_frag(data);
1033                 return NULL;
1034         }
1035
1036         skb_reserve(skb, bp->rx_offset);
1037         skb_put(skb, offset_and_len & 0xffff);
1038         return skb;
1039 }
1040
1041 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1042                                      struct bnxt_cp_ring_info *cpr,
1043                                      struct sk_buff *skb, u16 idx,
1044                                      u32 agg_bufs, bool tpa)
1045 {
1046         struct bnxt_napi *bnapi = cpr->bnapi;
1047         struct pci_dev *pdev = bp->pdev;
1048         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1049         u16 prod = rxr->rx_agg_prod;
1050         bool p5_tpa = false;
1051         u32 i;
1052
1053         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1054                 p5_tpa = true;
1055
1056         for (i = 0; i < agg_bufs; i++) {
1057                 u16 cons, frag_len;
1058                 struct rx_agg_cmp *agg;
1059                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1060                 struct page *page;
1061                 dma_addr_t mapping;
1062
1063                 if (p5_tpa)
1064                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1065                 else
1066                         agg = bnxt_get_agg(bp, cpr, idx, i);
1067                 cons = agg->rx_agg_cmp_opaque;
1068                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1069                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1070
1071                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1072                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1073                                    cons_rx_buf->offset, frag_len);
1074                 __clear_bit(cons, rxr->rx_agg_bmap);
1075
1076                 /* It is possible for bnxt_alloc_rx_page() to allocate
1077                  * a sw_prod index that equals the cons index, so we
1078                  * need to clear the cons entry now.
1079                  */
1080                 mapping = cons_rx_buf->mapping;
1081                 page = cons_rx_buf->page;
1082                 cons_rx_buf->page = NULL;
1083
1084                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1085                         struct skb_shared_info *shinfo;
1086                         unsigned int nr_frags;
1087
1088                         shinfo = skb_shinfo(skb);
1089                         nr_frags = --shinfo->nr_frags;
1090                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1091
1092                         dev_kfree_skb(skb);
1093
1094                         cons_rx_buf->page = page;
1095
1096                         /* Update prod since possibly some pages have been
1097                          * allocated already.
1098                          */
1099                         rxr->rx_agg_prod = prod;
1100                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1101                         return NULL;
1102                 }
1103
1104                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1105                                      DMA_FROM_DEVICE,
1106                                      DMA_ATTR_WEAK_ORDERING);
1107
1108                 skb->data_len += frag_len;
1109                 skb->len += frag_len;
1110                 skb->truesize += PAGE_SIZE;
1111
1112                 prod = NEXT_RX_AGG(prod);
1113         }
1114         rxr->rx_agg_prod = prod;
1115         return skb;
1116 }
1117
1118 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1119                                u8 agg_bufs, u32 *raw_cons)
1120 {
1121         u16 last;
1122         struct rx_agg_cmp *agg;
1123
1124         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1125         last = RING_CMP(*raw_cons);
1126         agg = (struct rx_agg_cmp *)
1127                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1128         return RX_AGG_CMP_VALID(agg, *raw_cons);
1129 }
1130
1131 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1132                                             unsigned int len,
1133                                             dma_addr_t mapping)
1134 {
1135         struct bnxt *bp = bnapi->bp;
1136         struct pci_dev *pdev = bp->pdev;
1137         struct sk_buff *skb;
1138
1139         skb = napi_alloc_skb(&bnapi->napi, len);
1140         if (!skb)
1141                 return NULL;
1142
1143         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1144                                 bp->rx_dir);
1145
1146         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1147                len + NET_IP_ALIGN);
1148
1149         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1150                                    bp->rx_dir);
1151
1152         skb_put(skb, len);
1153         return skb;
1154 }
1155
1156 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1157                            u32 *raw_cons, void *cmp)
1158 {
1159         struct rx_cmp *rxcmp = cmp;
1160         u32 tmp_raw_cons = *raw_cons;
1161         u8 cmp_type, agg_bufs = 0;
1162
1163         cmp_type = RX_CMP_TYPE(rxcmp);
1164
1165         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1166                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1167                             RX_CMP_AGG_BUFS) >>
1168                            RX_CMP_AGG_BUFS_SHIFT;
1169         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1170                 struct rx_tpa_end_cmp *tpa_end = cmp;
1171
1172                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1173                         return 0;
1174
1175                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1176         }
1177
1178         if (agg_bufs) {
1179                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1180                         return -EBUSY;
1181         }
1182         *raw_cons = tmp_raw_cons;
1183         return 0;
1184 }
1185
1186 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1187 {
1188         if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1189                 return;
1190
1191         if (BNXT_PF(bp))
1192                 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1193         else
1194                 schedule_delayed_work(&bp->fw_reset_task, delay);
1195 }
1196
1197 static void bnxt_queue_sp_work(struct bnxt *bp)
1198 {
1199         if (BNXT_PF(bp))
1200                 queue_work(bnxt_pf_wq, &bp->sp_task);
1201         else
1202                 schedule_work(&bp->sp_task);
1203 }
1204
1205 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1206 {
1207         if (!rxr->bnapi->in_reset) {
1208                 rxr->bnapi->in_reset = true;
1209                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1210                         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1211                 else
1212                         set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1213                 bnxt_queue_sp_work(bp);
1214         }
1215         rxr->rx_next_cons = 0xffff;
1216 }
1217
1218 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1219 {
1220         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1221         u16 idx = agg_id & MAX_TPA_P5_MASK;
1222
1223         if (test_bit(idx, map->agg_idx_bmap))
1224                 idx = find_first_zero_bit(map->agg_idx_bmap,
1225                                           BNXT_AGG_IDX_BMAP_SIZE);
1226         __set_bit(idx, map->agg_idx_bmap);
1227         map->agg_id_tbl[agg_id] = idx;
1228         return idx;
1229 }
1230
1231 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1232 {
1233         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1234
1235         __clear_bit(idx, map->agg_idx_bmap);
1236 }
1237
1238 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1239 {
1240         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1241
1242         return map->agg_id_tbl[agg_id];
1243 }
1244
1245 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1246                            struct rx_tpa_start_cmp *tpa_start,
1247                            struct rx_tpa_start_cmp_ext *tpa_start1)
1248 {
1249         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1250         struct bnxt_tpa_info *tpa_info;
1251         u16 cons, prod, agg_id;
1252         struct rx_bd *prod_bd;
1253         dma_addr_t mapping;
1254
1255         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1256                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1257                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1258         } else {
1259                 agg_id = TPA_START_AGG_ID(tpa_start);
1260         }
1261         cons = tpa_start->rx_tpa_start_cmp_opaque;
1262         prod = rxr->rx_prod;
1263         cons_rx_buf = &rxr->rx_buf_ring[cons];
1264         prod_rx_buf = &rxr->rx_buf_ring[prod];
1265         tpa_info = &rxr->rx_tpa[agg_id];
1266
1267         if (unlikely(cons != rxr->rx_next_cons ||
1268                      TPA_START_ERROR(tpa_start))) {
1269                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1270                             cons, rxr->rx_next_cons,
1271                             TPA_START_ERROR_CODE(tpa_start1));
1272                 bnxt_sched_reset(bp, rxr);
1273                 return;
1274         }
1275         /* Store cfa_code in tpa_info to use in tpa_end
1276          * completion processing.
1277          */
1278         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1279         prod_rx_buf->data = tpa_info->data;
1280         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1281
1282         mapping = tpa_info->mapping;
1283         prod_rx_buf->mapping = mapping;
1284
1285         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1286
1287         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1288
1289         tpa_info->data = cons_rx_buf->data;
1290         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1291         cons_rx_buf->data = NULL;
1292         tpa_info->mapping = cons_rx_buf->mapping;
1293
1294         tpa_info->len =
1295                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1296                                 RX_TPA_START_CMP_LEN_SHIFT;
1297         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1298                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1299
1300                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1301                 tpa_info->gso_type = SKB_GSO_TCPV4;
1302                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1303                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1304                         tpa_info->gso_type = SKB_GSO_TCPV6;
1305                 tpa_info->rss_hash =
1306                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1307         } else {
1308                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1309                 tpa_info->gso_type = 0;
1310                 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1311         }
1312         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1313         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1314         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1315         tpa_info->agg_count = 0;
1316
1317         rxr->rx_prod = NEXT_RX(prod);
1318         cons = NEXT_RX(cons);
1319         rxr->rx_next_cons = NEXT_RX(cons);
1320         cons_rx_buf = &rxr->rx_buf_ring[cons];
1321
1322         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1323         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1324         cons_rx_buf->data = NULL;
1325 }
1326
1327 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1328 {
1329         if (agg_bufs)
1330                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1331 }
1332
1333 #ifdef CONFIG_INET
1334 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1335 {
1336         struct udphdr *uh = NULL;
1337
1338         if (ip_proto == htons(ETH_P_IP)) {
1339                 struct iphdr *iph = (struct iphdr *)skb->data;
1340
1341                 if (iph->protocol == IPPROTO_UDP)
1342                         uh = (struct udphdr *)(iph + 1);
1343         } else {
1344                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1345
1346                 if (iph->nexthdr == IPPROTO_UDP)
1347                         uh = (struct udphdr *)(iph + 1);
1348         }
1349         if (uh) {
1350                 if (uh->check)
1351                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1352                 else
1353                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1354         }
1355 }
1356 #endif
1357
1358 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1359                                            int payload_off, int tcp_ts,
1360                                            struct sk_buff *skb)
1361 {
1362 #ifdef CONFIG_INET
1363         struct tcphdr *th;
1364         int len, nw_off;
1365         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1366         u32 hdr_info = tpa_info->hdr_info;
1367         bool loopback = false;
1368
1369         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1370         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1371         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1372
1373         /* If the packet is an internal loopback packet, the offsets will
1374          * have an extra 4 bytes.
1375          */
1376         if (inner_mac_off == 4) {
1377                 loopback = true;
1378         } else if (inner_mac_off > 4) {
1379                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1380                                             ETH_HLEN - 2));
1381
1382                 /* We only support inner iPv4/ipv6.  If we don't see the
1383                  * correct protocol ID, it must be a loopback packet where
1384                  * the offsets are off by 4.
1385                  */
1386                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1387                         loopback = true;
1388         }
1389         if (loopback) {
1390                 /* internal loopback packet, subtract all offsets by 4 */
1391                 inner_ip_off -= 4;
1392                 inner_mac_off -= 4;
1393                 outer_ip_off -= 4;
1394         }
1395
1396         nw_off = inner_ip_off - ETH_HLEN;
1397         skb_set_network_header(skb, nw_off);
1398         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1399                 struct ipv6hdr *iph = ipv6_hdr(skb);
1400
1401                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1402                 len = skb->len - skb_transport_offset(skb);
1403                 th = tcp_hdr(skb);
1404                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1405         } else {
1406                 struct iphdr *iph = ip_hdr(skb);
1407
1408                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1409                 len = skb->len - skb_transport_offset(skb);
1410                 th = tcp_hdr(skb);
1411                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1412         }
1413
1414         if (inner_mac_off) { /* tunnel */
1415                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1416                                             ETH_HLEN - 2));
1417
1418                 bnxt_gro_tunnel(skb, proto);
1419         }
1420 #endif
1421         return skb;
1422 }
1423
1424 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1425                                            int payload_off, int tcp_ts,
1426                                            struct sk_buff *skb)
1427 {
1428 #ifdef CONFIG_INET
1429         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1430         u32 hdr_info = tpa_info->hdr_info;
1431         int iphdr_len, nw_off;
1432
1433         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1434         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1435         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1436
1437         nw_off = inner_ip_off - ETH_HLEN;
1438         skb_set_network_header(skb, nw_off);
1439         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1440                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1441         skb_set_transport_header(skb, nw_off + iphdr_len);
1442
1443         if (inner_mac_off) { /* tunnel */
1444                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1445                                             ETH_HLEN - 2));
1446
1447                 bnxt_gro_tunnel(skb, proto);
1448         }
1449 #endif
1450         return skb;
1451 }
1452
1453 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1454 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1455
1456 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1457                                            int payload_off, int tcp_ts,
1458                                            struct sk_buff *skb)
1459 {
1460 #ifdef CONFIG_INET
1461         struct tcphdr *th;
1462         int len, nw_off, tcp_opt_len = 0;
1463
1464         if (tcp_ts)
1465                 tcp_opt_len = 12;
1466
1467         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1468                 struct iphdr *iph;
1469
1470                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1471                          ETH_HLEN;
1472                 skb_set_network_header(skb, nw_off);
1473                 iph = ip_hdr(skb);
1474                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1475                 len = skb->len - skb_transport_offset(skb);
1476                 th = tcp_hdr(skb);
1477                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1478         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1479                 struct ipv6hdr *iph;
1480
1481                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1482                          ETH_HLEN;
1483                 skb_set_network_header(skb, nw_off);
1484                 iph = ipv6_hdr(skb);
1485                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1486                 len = skb->len - skb_transport_offset(skb);
1487                 th = tcp_hdr(skb);
1488                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1489         } else {
1490                 dev_kfree_skb_any(skb);
1491                 return NULL;
1492         }
1493
1494         if (nw_off) /* tunnel */
1495                 bnxt_gro_tunnel(skb, skb->protocol);
1496 #endif
1497         return skb;
1498 }
1499
1500 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1501                                            struct bnxt_tpa_info *tpa_info,
1502                                            struct rx_tpa_end_cmp *tpa_end,
1503                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1504                                            struct sk_buff *skb)
1505 {
1506 #ifdef CONFIG_INET
1507         int payload_off;
1508         u16 segs;
1509
1510         segs = TPA_END_TPA_SEGS(tpa_end);
1511         if (segs == 1)
1512                 return skb;
1513
1514         NAPI_GRO_CB(skb)->count = segs;
1515         skb_shinfo(skb)->gso_size =
1516                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1517         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1518         if (bp->flags & BNXT_FLAG_CHIP_P5)
1519                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1520         else
1521                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1522         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1523         if (likely(skb))
1524                 tcp_gro_complete(skb);
1525 #endif
1526         return skb;
1527 }
1528
1529 /* Given the cfa_code of a received packet determine which
1530  * netdev (vf-rep or PF) the packet is destined to.
1531  */
1532 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1533 {
1534         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1535
1536         /* if vf-rep dev is NULL, the must belongs to the PF */
1537         return dev ? dev : bp->dev;
1538 }
1539
1540 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1541                                            struct bnxt_cp_ring_info *cpr,
1542                                            u32 *raw_cons,
1543                                            struct rx_tpa_end_cmp *tpa_end,
1544                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1545                                            u8 *event)
1546 {
1547         struct bnxt_napi *bnapi = cpr->bnapi;
1548         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1549         u8 *data_ptr, agg_bufs;
1550         unsigned int len;
1551         struct bnxt_tpa_info *tpa_info;
1552         dma_addr_t mapping;
1553         struct sk_buff *skb;
1554         u16 idx = 0, agg_id;
1555         void *data;
1556         bool gro;
1557
1558         if (unlikely(bnapi->in_reset)) {
1559                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1560
1561                 if (rc < 0)
1562                         return ERR_PTR(-EBUSY);
1563                 return NULL;
1564         }
1565
1566         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1567                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1568                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1569                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1570                 tpa_info = &rxr->rx_tpa[agg_id];
1571                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1572                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1573                                     agg_bufs, tpa_info->agg_count);
1574                         agg_bufs = tpa_info->agg_count;
1575                 }
1576                 tpa_info->agg_count = 0;
1577                 *event |= BNXT_AGG_EVENT;
1578                 bnxt_free_agg_idx(rxr, agg_id);
1579                 idx = agg_id;
1580                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1581         } else {
1582                 agg_id = TPA_END_AGG_ID(tpa_end);
1583                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1584                 tpa_info = &rxr->rx_tpa[agg_id];
1585                 idx = RING_CMP(*raw_cons);
1586                 if (agg_bufs) {
1587                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1588                                 return ERR_PTR(-EBUSY);
1589
1590                         *event |= BNXT_AGG_EVENT;
1591                         idx = NEXT_CMP(idx);
1592                 }
1593                 gro = !!TPA_END_GRO(tpa_end);
1594         }
1595         data = tpa_info->data;
1596         data_ptr = tpa_info->data_ptr;
1597         prefetch(data_ptr);
1598         len = tpa_info->len;
1599         mapping = tpa_info->mapping;
1600
1601         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1602                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1603                 if (agg_bufs > MAX_SKB_FRAGS)
1604                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1605                                     agg_bufs, (int)MAX_SKB_FRAGS);
1606                 return NULL;
1607         }
1608
1609         if (len <= bp->rx_copy_thresh) {
1610                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1611                 if (!skb) {
1612                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1613                         cpr->sw_stats.rx.rx_oom_discards += 1;
1614                         return NULL;
1615                 }
1616         } else {
1617                 u8 *new_data;
1618                 dma_addr_t new_mapping;
1619
1620                 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
1621                 if (!new_data) {
1622                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1623                         cpr->sw_stats.rx.rx_oom_discards += 1;
1624                         return NULL;
1625                 }
1626
1627                 tpa_info->data = new_data;
1628                 tpa_info->data_ptr = new_data + bp->rx_offset;
1629                 tpa_info->mapping = new_mapping;
1630
1631                 skb = build_skb(data, bp->rx_buf_size);
1632                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1633                                        bp->rx_buf_use_size, bp->rx_dir,
1634                                        DMA_ATTR_WEAK_ORDERING);
1635
1636                 if (!skb) {
1637                         skb_free_frag(data);
1638                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1639                         cpr->sw_stats.rx.rx_oom_discards += 1;
1640                         return NULL;
1641                 }
1642                 skb_reserve(skb, bp->rx_offset);
1643                 skb_put(skb, len);
1644         }
1645
1646         if (agg_bufs) {
1647                 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1648                 if (!skb) {
1649                         /* Page reuse already handled by bnxt_rx_pages(). */
1650                         cpr->sw_stats.rx.rx_oom_discards += 1;
1651                         return NULL;
1652                 }
1653         }
1654
1655         skb->protocol =
1656                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1657
1658         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1659                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1660
1661         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1662             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1663                 __be16 vlan_proto = htons(tpa_info->metadata >>
1664                                           RX_CMP_FLAGS2_METADATA_TPID_SFT);
1665                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1666
1667                 if (eth_type_vlan(vlan_proto)) {
1668                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1669                 } else {
1670                         dev_kfree_skb(skb);
1671                         return NULL;
1672                 }
1673         }
1674
1675         skb_checksum_none_assert(skb);
1676         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1677                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1678                 skb->csum_level =
1679                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1680         }
1681
1682         if (gro)
1683                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1684
1685         return skb;
1686 }
1687
1688 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1689                          struct rx_agg_cmp *rx_agg)
1690 {
1691         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1692         struct bnxt_tpa_info *tpa_info;
1693
1694         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1695         tpa_info = &rxr->rx_tpa[agg_id];
1696         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1697         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1698 }
1699
1700 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1701                              struct sk_buff *skb)
1702 {
1703         if (skb->dev != bp->dev) {
1704                 /* this packet belongs to a vf-rep */
1705                 bnxt_vf_rep_rx(bp, skb);
1706                 return;
1707         }
1708         skb_record_rx_queue(skb, bnapi->index);
1709         napi_gro_receive(&bnapi->napi, skb);
1710 }
1711
1712 /* returns the following:
1713  * 1       - 1 packet successfully received
1714  * 0       - successful TPA_START, packet not completed yet
1715  * -EBUSY  - completion ring does not have all the agg buffers yet
1716  * -ENOMEM - packet aborted due to out of memory
1717  * -EIO    - packet aborted due to hw error indicated in BD
1718  */
1719 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1720                        u32 *raw_cons, u8 *event)
1721 {
1722         struct bnxt_napi *bnapi = cpr->bnapi;
1723         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1724         struct net_device *dev = bp->dev;
1725         struct rx_cmp *rxcmp;
1726         struct rx_cmp_ext *rxcmp1;
1727         u32 tmp_raw_cons = *raw_cons;
1728         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1729         struct bnxt_sw_rx_bd *rx_buf;
1730         unsigned int len;
1731         u8 *data_ptr, agg_bufs, cmp_type;
1732         dma_addr_t dma_addr;
1733         struct sk_buff *skb;
1734         u32 flags, misc;
1735         void *data;
1736         int rc = 0;
1737
1738         rxcmp = (struct rx_cmp *)
1739                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1740
1741         cmp_type = RX_CMP_TYPE(rxcmp);
1742
1743         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1744                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1745                 goto next_rx_no_prod_no_len;
1746         }
1747
1748         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1749         cp_cons = RING_CMP(tmp_raw_cons);
1750         rxcmp1 = (struct rx_cmp_ext *)
1751                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1752
1753         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1754                 return -EBUSY;
1755
1756         /* The valid test of the entry must be done first before
1757          * reading any further.
1758          */
1759         dma_rmb();
1760         prod = rxr->rx_prod;
1761
1762         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1763                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1764                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1765
1766                 *event |= BNXT_RX_EVENT;
1767                 goto next_rx_no_prod_no_len;
1768
1769         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1770                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1771                                    (struct rx_tpa_end_cmp *)rxcmp,
1772                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1773
1774                 if (IS_ERR(skb))
1775                         return -EBUSY;
1776
1777                 rc = -ENOMEM;
1778                 if (likely(skb)) {
1779                         bnxt_deliver_skb(bp, bnapi, skb);
1780                         rc = 1;
1781                 }
1782                 *event |= BNXT_RX_EVENT;
1783                 goto next_rx_no_prod_no_len;
1784         }
1785
1786         cons = rxcmp->rx_cmp_opaque;
1787         if (unlikely(cons != rxr->rx_next_cons)) {
1788                 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1789
1790                 /* 0xffff is forced error, don't print it */
1791                 if (rxr->rx_next_cons != 0xffff)
1792                         netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1793                                     cons, rxr->rx_next_cons);
1794                 bnxt_sched_reset(bp, rxr);
1795                 if (rc1)
1796                         return rc1;
1797                 goto next_rx_no_prod_no_len;
1798         }
1799         rx_buf = &rxr->rx_buf_ring[cons];
1800         data = rx_buf->data;
1801         data_ptr = rx_buf->data_ptr;
1802         prefetch(data_ptr);
1803
1804         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1805         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1806
1807         if (agg_bufs) {
1808                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1809                         return -EBUSY;
1810
1811                 cp_cons = NEXT_CMP(cp_cons);
1812                 *event |= BNXT_AGG_EVENT;
1813         }
1814         *event |= BNXT_RX_EVENT;
1815
1816         rx_buf->data = NULL;
1817         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1818                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1819
1820                 bnxt_reuse_rx_data(rxr, cons, data);
1821                 if (agg_bufs)
1822                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1823                                                false);
1824
1825                 rc = -EIO;
1826                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1827                         bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1828                         if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1829                             !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1830                                 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1831                                                  rx_err);
1832                                 bnxt_sched_reset(bp, rxr);
1833                         }
1834                 }
1835                 goto next_rx_no_len;
1836         }
1837
1838         flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1839         len = flags >> RX_CMP_LEN_SHIFT;
1840         dma_addr = rx_buf->mapping;
1841
1842         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1843                 rc = 1;
1844                 goto next_rx;
1845         }
1846
1847         if (len <= bp->rx_copy_thresh) {
1848                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1849                 bnxt_reuse_rx_data(rxr, cons, data);
1850                 if (!skb) {
1851                         if (agg_bufs)
1852                                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1853                                                        agg_bufs, false);
1854                         cpr->sw_stats.rx.rx_oom_discards += 1;
1855                         rc = -ENOMEM;
1856                         goto next_rx;
1857                 }
1858         } else {
1859                 u32 payload;
1860
1861                 if (rx_buf->data_ptr == data_ptr)
1862                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1863                 else
1864                         payload = 0;
1865                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1866                                       payload | len);
1867                 if (!skb) {
1868                         cpr->sw_stats.rx.rx_oom_discards += 1;
1869                         rc = -ENOMEM;
1870                         goto next_rx;
1871                 }
1872         }
1873
1874         if (agg_bufs) {
1875                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1876                 if (!skb) {
1877                         cpr->sw_stats.rx.rx_oom_discards += 1;
1878                         rc = -ENOMEM;
1879                         goto next_rx;
1880                 }
1881         }
1882
1883         if (RX_CMP_HASH_VALID(rxcmp)) {
1884                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1885                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1886
1887                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1888                 if (hash_type != 1 && hash_type != 3)
1889                         type = PKT_HASH_TYPE_L3;
1890                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1891         }
1892
1893         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1894         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1895
1896         if ((rxcmp1->rx_cmp_flags2 &
1897              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1898             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1899                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1900                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1901                 __be16 vlan_proto = htons(meta_data >>
1902                                           RX_CMP_FLAGS2_METADATA_TPID_SFT);
1903
1904                 if (eth_type_vlan(vlan_proto)) {
1905                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1906                 } else {
1907                         dev_kfree_skb(skb);
1908                         goto next_rx;
1909                 }
1910         }
1911
1912         skb_checksum_none_assert(skb);
1913         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1914                 if (dev->features & NETIF_F_RXCSUM) {
1915                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1916                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1917                 }
1918         } else {
1919                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1920                         if (dev->features & NETIF_F_RXCSUM)
1921                                 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1922                 }
1923         }
1924
1925         if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1926                      RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1927                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1928                         u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1929                         u64 ns, ts;
1930
1931                         if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1932                                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1933
1934                                 spin_lock_bh(&ptp->ptp_lock);
1935                                 ns = timecounter_cyc2time(&ptp->tc, ts);
1936                                 spin_unlock_bh(&ptp->ptp_lock);
1937                                 memset(skb_hwtstamps(skb), 0,
1938                                        sizeof(*skb_hwtstamps(skb)));
1939                                 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1940                         }
1941                 }
1942         }
1943         bnxt_deliver_skb(bp, bnapi, skb);
1944         rc = 1;
1945
1946 next_rx:
1947         cpr->rx_packets += 1;
1948         cpr->rx_bytes += len;
1949
1950 next_rx_no_len:
1951         rxr->rx_prod = NEXT_RX(prod);
1952         rxr->rx_next_cons = NEXT_RX(cons);
1953
1954 next_rx_no_prod_no_len:
1955         *raw_cons = tmp_raw_cons;
1956
1957         return rc;
1958 }
1959
1960 /* In netpoll mode, if we are using a combined completion ring, we need to
1961  * discard the rx packets and recycle the buffers.
1962  */
1963 static int bnxt_force_rx_discard(struct bnxt *bp,
1964                                  struct bnxt_cp_ring_info *cpr,
1965                                  u32 *raw_cons, u8 *event)
1966 {
1967         u32 tmp_raw_cons = *raw_cons;
1968         struct rx_cmp_ext *rxcmp1;
1969         struct rx_cmp *rxcmp;
1970         u16 cp_cons;
1971         u8 cmp_type;
1972         int rc;
1973
1974         cp_cons = RING_CMP(tmp_raw_cons);
1975         rxcmp = (struct rx_cmp *)
1976                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1977
1978         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1979         cp_cons = RING_CMP(tmp_raw_cons);
1980         rxcmp1 = (struct rx_cmp_ext *)
1981                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1982
1983         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1984                 return -EBUSY;
1985
1986         /* The valid test of the entry must be done first before
1987          * reading any further.
1988          */
1989         dma_rmb();
1990         cmp_type = RX_CMP_TYPE(rxcmp);
1991         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1992                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1993                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1994         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1995                 struct rx_tpa_end_cmp_ext *tpa_end1;
1996
1997                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1998                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1999                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2000         }
2001         rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2002         if (rc && rc != -EBUSY)
2003                 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2004         return rc;
2005 }
2006
2007 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2008 {
2009         struct bnxt_fw_health *fw_health = bp->fw_health;
2010         u32 reg = fw_health->regs[reg_idx];
2011         u32 reg_type, reg_off, val = 0;
2012
2013         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2014         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2015         switch (reg_type) {
2016         case BNXT_FW_HEALTH_REG_TYPE_CFG:
2017                 pci_read_config_dword(bp->pdev, reg_off, &val);
2018                 break;
2019         case BNXT_FW_HEALTH_REG_TYPE_GRC:
2020                 reg_off = fw_health->mapped_regs[reg_idx];
2021                 fallthrough;
2022         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2023                 val = readl(bp->bar0 + reg_off);
2024                 break;
2025         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2026                 val = readl(bp->bar1 + reg_off);
2027                 break;
2028         }
2029         if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2030                 val &= fw_health->fw_reset_inprog_reg_mask;
2031         return val;
2032 }
2033
2034 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2035 {
2036         int i;
2037
2038         for (i = 0; i < bp->rx_nr_rings; i++) {
2039                 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2040                 struct bnxt_ring_grp_info *grp_info;
2041
2042                 grp_info = &bp->grp_info[grp_idx];
2043                 if (grp_info->agg_fw_ring_id == ring_id)
2044                         return grp_idx;
2045         }
2046         return INVALID_HW_RING_ID;
2047 }
2048
2049 static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2050 {
2051         u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2052
2053         switch (err_type) {
2054         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2055                 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2056                            BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2057                 break;
2058         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2059                 netdev_warn(bp->dev, "Pause Storm detected!\n");
2060                 break;
2061         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2062                 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2063                 break;
2064         default:
2065                 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2066                            err_type);
2067                 break;
2068         }
2069 }
2070
2071 #define BNXT_GET_EVENT_PORT(data)       \
2072         ((data) &                       \
2073          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2074
2075 #define BNXT_EVENT_RING_TYPE(data2)     \
2076         ((data2) &                      \
2077          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2078
2079 #define BNXT_EVENT_RING_TYPE_RX(data2)  \
2080         (BNXT_EVENT_RING_TYPE(data2) == \
2081          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2082
2083 #define BNXT_EVENT_PHC_EVENT_TYPE(data1)        \
2084         (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2085          ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2086
2087 #define BNXT_EVENT_PHC_RTC_UPDATE(data1)        \
2088         (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2089          ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2090
2091 #define BNXT_PHC_BITS   48
2092
2093 static int bnxt_async_event_process(struct bnxt *bp,
2094                                     struct hwrm_async_event_cmpl *cmpl)
2095 {
2096         u16 event_id = le16_to_cpu(cmpl->event_id);
2097         u32 data1 = le32_to_cpu(cmpl->event_data1);
2098         u32 data2 = le32_to_cpu(cmpl->event_data2);
2099
2100         netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2101                    event_id, data1, data2);
2102
2103         /* TODO CHIMP_FW: Define event id's for link change, error etc */
2104         switch (event_id) {
2105         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2106                 struct bnxt_link_info *link_info = &bp->link_info;
2107
2108                 if (BNXT_VF(bp))
2109                         goto async_event_process_exit;
2110
2111                 /* print unsupported speed warning in forced speed mode only */
2112                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2113                     (data1 & 0x20000)) {
2114                         u16 fw_speed = link_info->force_link_speed;
2115                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2116
2117                         if (speed != SPEED_UNKNOWN)
2118                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2119                                             speed);
2120                 }
2121                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2122         }
2123                 fallthrough;
2124         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2125         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2126                 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2127                 fallthrough;
2128         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2129                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2130                 break;
2131         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2132                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2133                 break;
2134         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2135                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2136
2137                 if (BNXT_VF(bp))
2138                         break;
2139
2140                 if (bp->pf.port_id != port_id)
2141                         break;
2142
2143                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2144                 break;
2145         }
2146         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2147                 if (BNXT_PF(bp))
2148                         goto async_event_process_exit;
2149                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2150                 break;
2151         case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2152                 char *type_str = "Solicited";
2153
2154                 if (!bp->fw_health)
2155                         goto async_event_process_exit;
2156
2157                 bp->fw_reset_timestamp = jiffies;
2158                 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2159                 if (!bp->fw_reset_min_dsecs)
2160                         bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2161                 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2162                 if (!bp->fw_reset_max_dsecs)
2163                         bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2164                 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2165                         set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2166                 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2167                         type_str = "Fatal";
2168                         bp->fw_health->fatalities++;
2169                         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2170                 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2171                            EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2172                         type_str = "Non-fatal";
2173                         bp->fw_health->survivals++;
2174                         set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2175                 }
2176                 netif_warn(bp, hw, bp->dev,
2177                            "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2178                            type_str, data1, data2,
2179                            bp->fw_reset_min_dsecs * 100,
2180                            bp->fw_reset_max_dsecs * 100);
2181                 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2182                 break;
2183         }
2184         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2185                 struct bnxt_fw_health *fw_health = bp->fw_health;
2186                 char *status_desc = "healthy";
2187                 u32 status;
2188
2189                 if (!fw_health)
2190                         goto async_event_process_exit;
2191
2192                 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2193                         fw_health->enabled = false;
2194                         netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2195                         break;
2196                 }
2197                 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2198                 fw_health->tmr_multiplier =
2199                         DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2200                                      bp->current_interval * 10);
2201                 fw_health->tmr_counter = fw_health->tmr_multiplier;
2202                 if (!fw_health->enabled)
2203                         fw_health->last_fw_heartbeat =
2204                                 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2205                 fw_health->last_fw_reset_cnt =
2206                         bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2207                 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2208                 if (status != BNXT_FW_STATUS_HEALTHY)
2209                         status_desc = "unhealthy";
2210                 netif_info(bp, drv, bp->dev,
2211                            "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2212                            fw_health->primary ? "primary" : "backup", status,
2213                            status_desc, fw_health->last_fw_reset_cnt);
2214                 if (!fw_health->enabled) {
2215                         /* Make sure tmr_counter is set and visible to
2216                          * bnxt_health_check() before setting enabled to true.
2217                          */
2218                         smp_wmb();
2219                         fw_health->enabled = true;
2220                 }
2221                 goto async_event_process_exit;
2222         }
2223         case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2224                 netif_notice(bp, hw, bp->dev,
2225                              "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2226                              data1, data2);
2227                 goto async_event_process_exit;
2228         case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2229                 struct bnxt_rx_ring_info *rxr;
2230                 u16 grp_idx;
2231
2232                 if (bp->flags & BNXT_FLAG_CHIP_P5)
2233                         goto async_event_process_exit;
2234
2235                 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2236                             BNXT_EVENT_RING_TYPE(data2), data1);
2237                 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2238                         goto async_event_process_exit;
2239
2240                 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2241                 if (grp_idx == INVALID_HW_RING_ID) {
2242                         netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2243                                     data1);
2244                         goto async_event_process_exit;
2245                 }
2246                 rxr = bp->bnapi[grp_idx]->rx_ring;
2247                 bnxt_sched_reset(bp, rxr);
2248                 goto async_event_process_exit;
2249         }
2250         case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2251                 struct bnxt_fw_health *fw_health = bp->fw_health;
2252
2253                 netif_notice(bp, hw, bp->dev,
2254                              "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2255                              data1, data2);
2256                 if (fw_health) {
2257                         fw_health->echo_req_data1 = data1;
2258                         fw_health->echo_req_data2 = data2;
2259                         set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2260                         break;
2261                 }
2262                 goto async_event_process_exit;
2263         }
2264         case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2265                 bnxt_ptp_pps_event(bp, data1, data2);
2266                 goto async_event_process_exit;
2267         }
2268         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2269                 bnxt_event_error_report(bp, data1, data2);
2270                 goto async_event_process_exit;
2271         }
2272         case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2273                 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2274                 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2275                         if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
2276                                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2277                                 u64 ns;
2278
2279                                 spin_lock_bh(&ptp->ptp_lock);
2280                                 bnxt_ptp_update_current_time(bp);
2281                                 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2282                                        BNXT_PHC_BITS) | ptp->current_time);
2283                                 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2284                                 spin_unlock_bh(&ptp->ptp_lock);
2285                         }
2286                         break;
2287                 }
2288                 goto async_event_process_exit;
2289         }
2290         case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2291                 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2292
2293                 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2294                 goto async_event_process_exit;
2295         }
2296         default:
2297                 goto async_event_process_exit;
2298         }
2299         bnxt_queue_sp_work(bp);
2300 async_event_process_exit:
2301         bnxt_ulp_async_events(bp, cmpl);
2302         return 0;
2303 }
2304
2305 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2306 {
2307         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2308         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2309         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2310                                 (struct hwrm_fwd_req_cmpl *)txcmp;
2311
2312         switch (cmpl_type) {
2313         case CMPL_BASE_TYPE_HWRM_DONE:
2314                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2315                 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2316                 break;
2317
2318         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2319                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2320
2321                 if ((vf_id < bp->pf.first_vf_id) ||
2322                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2323                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2324                                    vf_id);
2325                         return -EINVAL;
2326                 }
2327
2328                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2329                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2330                 bnxt_queue_sp_work(bp);
2331                 break;
2332
2333         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2334                 bnxt_async_event_process(bp,
2335                                          (struct hwrm_async_event_cmpl *)txcmp);
2336                 break;
2337
2338         default:
2339                 break;
2340         }
2341
2342         return 0;
2343 }
2344
2345 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2346 {
2347         struct bnxt_napi *bnapi = dev_instance;
2348         struct bnxt *bp = bnapi->bp;
2349         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2350         u32 cons = RING_CMP(cpr->cp_raw_cons);
2351
2352         cpr->event_ctr++;
2353         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2354         napi_schedule(&bnapi->napi);
2355         return IRQ_HANDLED;
2356 }
2357
2358 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2359 {
2360         u32 raw_cons = cpr->cp_raw_cons;
2361         u16 cons = RING_CMP(raw_cons);
2362         struct tx_cmp *txcmp;
2363
2364         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2365
2366         return TX_CMP_VALID(txcmp, raw_cons);
2367 }
2368
2369 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2370 {
2371         struct bnxt_napi *bnapi = dev_instance;
2372         struct bnxt *bp = bnapi->bp;
2373         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2374         u32 cons = RING_CMP(cpr->cp_raw_cons);
2375         u32 int_status;
2376
2377         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2378
2379         if (!bnxt_has_work(bp, cpr)) {
2380                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2381                 /* return if erroneous interrupt */
2382                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2383                         return IRQ_NONE;
2384         }
2385
2386         /* disable ring IRQ */
2387         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2388
2389         /* Return here if interrupt is shared and is disabled. */
2390         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2391                 return IRQ_HANDLED;
2392
2393         napi_schedule(&bnapi->napi);
2394         return IRQ_HANDLED;
2395 }
2396
2397 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2398                             int budget)
2399 {
2400         struct bnxt_napi *bnapi = cpr->bnapi;
2401         u32 raw_cons = cpr->cp_raw_cons;
2402         u32 cons;
2403         int tx_pkts = 0;
2404         int rx_pkts = 0;
2405         u8 event = 0;
2406         struct tx_cmp *txcmp;
2407
2408         cpr->has_more_work = 0;
2409         cpr->had_work_done = 1;
2410         while (1) {
2411                 int rc;
2412
2413                 cons = RING_CMP(raw_cons);
2414                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2415
2416                 if (!TX_CMP_VALID(txcmp, raw_cons))
2417                         break;
2418
2419                 /* The valid test of the entry must be done first before
2420                  * reading any further.
2421                  */
2422                 dma_rmb();
2423                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2424                         tx_pkts++;
2425                         /* return full budget so NAPI will complete. */
2426                         if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
2427                                 rx_pkts = budget;
2428                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2429                                 if (budget)
2430                                         cpr->has_more_work = 1;
2431                                 break;
2432                         }
2433                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2434                         if (likely(budget))
2435                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2436                         else
2437                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2438                                                            &event);
2439                         if (likely(rc >= 0))
2440                                 rx_pkts += rc;
2441                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2442                          * the NAPI budget.  Otherwise, we may potentially loop
2443                          * here forever if we consistently cannot allocate
2444                          * buffers.
2445                          */
2446                         else if (rc == -ENOMEM && budget)
2447                                 rx_pkts++;
2448                         else if (rc == -EBUSY)  /* partial completion */
2449                                 break;
2450                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2451                                      CMPL_BASE_TYPE_HWRM_DONE) ||
2452                                     (TX_CMP_TYPE(txcmp) ==
2453                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2454                                     (TX_CMP_TYPE(txcmp) ==
2455                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2456                         bnxt_hwrm_handler(bp, txcmp);
2457                 }
2458                 raw_cons = NEXT_RAW_CMP(raw_cons);
2459
2460                 if (rx_pkts && rx_pkts == budget) {
2461                         cpr->has_more_work = 1;
2462                         break;
2463                 }
2464         }
2465
2466         if (event & BNXT_REDIRECT_EVENT)
2467                 xdp_do_flush();
2468
2469         if (event & BNXT_TX_EVENT) {
2470                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2471                 u16 prod = txr->tx_prod;
2472
2473                 /* Sync BD data before updating doorbell */
2474                 wmb();
2475
2476                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2477         }
2478
2479         cpr->cp_raw_cons = raw_cons;
2480         bnapi->tx_pkts += tx_pkts;
2481         bnapi->events |= event;
2482         return rx_pkts;
2483 }
2484
2485 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2486 {
2487         if (bnapi->tx_pkts) {
2488                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2489                 bnapi->tx_pkts = 0;
2490         }
2491
2492         if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2493                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2494
2495                 if (bnapi->events & BNXT_AGG_EVENT)
2496                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2497                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2498         }
2499         bnapi->events = 0;
2500 }
2501
2502 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2503                           int budget)
2504 {
2505         struct bnxt_napi *bnapi = cpr->bnapi;
2506         int rx_pkts;
2507
2508         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2509
2510         /* ACK completion ring before freeing tx ring and producing new
2511          * buffers in rx/agg rings to prevent overflowing the completion
2512          * ring.
2513          */
2514         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2515
2516         __bnxt_poll_work_done(bp, bnapi);
2517         return rx_pkts;
2518 }
2519
2520 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2521 {
2522         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2523         struct bnxt *bp = bnapi->bp;
2524         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2525         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2526         struct tx_cmp *txcmp;
2527         struct rx_cmp_ext *rxcmp1;
2528         u32 cp_cons, tmp_raw_cons;
2529         u32 raw_cons = cpr->cp_raw_cons;
2530         u32 rx_pkts = 0;
2531         u8 event = 0;
2532
2533         while (1) {
2534                 int rc;
2535
2536                 cp_cons = RING_CMP(raw_cons);
2537                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2538
2539                 if (!TX_CMP_VALID(txcmp, raw_cons))
2540                         break;
2541
2542                 /* The valid test of the entry must be done first before
2543                  * reading any further.
2544                  */
2545                 dma_rmb();
2546                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2547                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2548                         cp_cons = RING_CMP(tmp_raw_cons);
2549                         rxcmp1 = (struct rx_cmp_ext *)
2550                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2551
2552                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2553                                 break;
2554
2555                         /* force an error to recycle the buffer */
2556                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2557                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2558
2559                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2560                         if (likely(rc == -EIO) && budget)
2561                                 rx_pkts++;
2562                         else if (rc == -EBUSY)  /* partial completion */
2563                                 break;
2564                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2565                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2566                         bnxt_hwrm_handler(bp, txcmp);
2567                 } else {
2568                         netdev_err(bp->dev,
2569                                    "Invalid completion received on special ring\n");
2570                 }
2571                 raw_cons = NEXT_RAW_CMP(raw_cons);
2572
2573                 if (rx_pkts == budget)
2574                         break;
2575         }
2576
2577         cpr->cp_raw_cons = raw_cons;
2578         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2579         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2580
2581         if (event & BNXT_AGG_EVENT)
2582                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2583
2584         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2585                 napi_complete_done(napi, rx_pkts);
2586                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2587         }
2588         return rx_pkts;
2589 }
2590
2591 static int bnxt_poll(struct napi_struct *napi, int budget)
2592 {
2593         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2594         struct bnxt *bp = bnapi->bp;
2595         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2596         int work_done = 0;
2597
2598         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2599                 napi_complete(napi);
2600                 return 0;
2601         }
2602         while (1) {
2603                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2604
2605                 if (work_done >= budget) {
2606                         if (!budget)
2607                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2608                         break;
2609                 }
2610
2611                 if (!bnxt_has_work(bp, cpr)) {
2612                         if (napi_complete_done(napi, work_done))
2613                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2614                         break;
2615                 }
2616         }
2617         if (bp->flags & BNXT_FLAG_DIM) {
2618                 struct dim_sample dim_sample = {};
2619
2620                 dim_update_sample(cpr->event_ctr,
2621                                   cpr->rx_packets,
2622                                   cpr->rx_bytes,
2623                                   &dim_sample);
2624                 net_dim(&cpr->dim, dim_sample);
2625         }
2626         return work_done;
2627 }
2628
2629 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2630 {
2631         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2632         int i, work_done = 0;
2633
2634         for (i = 0; i < 2; i++) {
2635                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2636
2637                 if (cpr2) {
2638                         work_done += __bnxt_poll_work(bp, cpr2,
2639                                                       budget - work_done);
2640                         cpr->has_more_work |= cpr2->has_more_work;
2641                 }
2642         }
2643         return work_done;
2644 }
2645
2646 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2647                                  u64 dbr_type)
2648 {
2649         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2650         int i;
2651
2652         for (i = 0; i < 2; i++) {
2653                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2654                 struct bnxt_db_info *db;
2655
2656                 if (cpr2 && cpr2->had_work_done) {
2657                         db = &cpr2->cp_db;
2658                         bnxt_writeq(bp, db->db_key64 | dbr_type |
2659                                     RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2660                         cpr2->had_work_done = 0;
2661                 }
2662         }
2663         __bnxt_poll_work_done(bp, bnapi);
2664 }
2665
2666 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2667 {
2668         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2669         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2670         struct bnxt_cp_ring_info *cpr_rx;
2671         u32 raw_cons = cpr->cp_raw_cons;
2672         struct bnxt *bp = bnapi->bp;
2673         struct nqe_cn *nqcmp;
2674         int work_done = 0;
2675         u32 cons;
2676
2677         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2678                 napi_complete(napi);
2679                 return 0;
2680         }
2681         if (cpr->has_more_work) {
2682                 cpr->has_more_work = 0;
2683                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2684         }
2685         while (1) {
2686                 cons = RING_CMP(raw_cons);
2687                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2688
2689                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2690                         if (cpr->has_more_work)
2691                                 break;
2692
2693                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2694                         cpr->cp_raw_cons = raw_cons;
2695                         if (napi_complete_done(napi, work_done))
2696                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2697                                                   cpr->cp_raw_cons);
2698                         goto poll_done;
2699                 }
2700
2701                 /* The valid test of the entry must be done first before
2702                  * reading any further.
2703                  */
2704                 dma_rmb();
2705
2706                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2707                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2708                         struct bnxt_cp_ring_info *cpr2;
2709
2710                         cpr2 = cpr->cp_ring_arr[idx];
2711                         work_done += __bnxt_poll_work(bp, cpr2,
2712                                                       budget - work_done);
2713                         cpr->has_more_work |= cpr2->has_more_work;
2714                 } else {
2715                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2716                 }
2717                 raw_cons = NEXT_RAW_CMP(raw_cons);
2718         }
2719         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2720         if (raw_cons != cpr->cp_raw_cons) {
2721                 cpr->cp_raw_cons = raw_cons;
2722                 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2723         }
2724 poll_done:
2725         cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
2726         if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
2727                 struct dim_sample dim_sample = {};
2728
2729                 dim_update_sample(cpr->event_ctr,
2730                                   cpr_rx->rx_packets,
2731                                   cpr_rx->rx_bytes,
2732                                   &dim_sample);
2733                 net_dim(&cpr->dim, dim_sample);
2734         }
2735         return work_done;
2736 }
2737
2738 static void bnxt_free_tx_skbs(struct bnxt *bp)
2739 {
2740         int i, max_idx;
2741         struct pci_dev *pdev = bp->pdev;
2742
2743         if (!bp->tx_ring)
2744                 return;
2745
2746         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2747         for (i = 0; i < bp->tx_nr_rings; i++) {
2748                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2749                 int j;
2750
2751                 if (!txr->tx_buf_ring)
2752                         continue;
2753
2754                 for (j = 0; j < max_idx;) {
2755                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2756                         struct sk_buff *skb;
2757                         int k, last;
2758
2759                         if (i < bp->tx_nr_rings_xdp &&
2760                             tx_buf->action == XDP_REDIRECT) {
2761                                 dma_unmap_single(&pdev->dev,
2762                                         dma_unmap_addr(tx_buf, mapping),
2763                                         dma_unmap_len(tx_buf, len),
2764                                         DMA_TO_DEVICE);
2765                                 xdp_return_frame(tx_buf->xdpf);
2766                                 tx_buf->action = 0;
2767                                 tx_buf->xdpf = NULL;
2768                                 j++;
2769                                 continue;
2770                         }
2771
2772                         skb = tx_buf->skb;
2773                         if (!skb) {
2774                                 j++;
2775                                 continue;
2776                         }
2777
2778                         tx_buf->skb = NULL;
2779
2780                         if (tx_buf->is_push) {
2781                                 dev_kfree_skb(skb);
2782                                 j += 2;
2783                                 continue;
2784                         }
2785
2786                         dma_unmap_single(&pdev->dev,
2787                                          dma_unmap_addr(tx_buf, mapping),
2788                                          skb_headlen(skb),
2789                                          DMA_TO_DEVICE);
2790
2791                         last = tx_buf->nr_frags;
2792                         j += 2;
2793                         for (k = 0; k < last; k++, j++) {
2794                                 int ring_idx = j & bp->tx_ring_mask;
2795                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2796
2797                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2798                                 dma_unmap_page(
2799                                         &pdev->dev,
2800                                         dma_unmap_addr(tx_buf, mapping),
2801                                         skb_frag_size(frag), DMA_TO_DEVICE);
2802                         }
2803                         dev_kfree_skb(skb);
2804                 }
2805                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2806         }
2807 }
2808
2809 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2810 {
2811         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2812         struct pci_dev *pdev = bp->pdev;
2813         struct bnxt_tpa_idx_map *map;
2814         int i, max_idx, max_agg_idx;
2815
2816         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2817         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2818         if (!rxr->rx_tpa)
2819                 goto skip_rx_tpa_free;
2820
2821         for (i = 0; i < bp->max_tpa; i++) {
2822                 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2823                 u8 *data = tpa_info->data;
2824
2825                 if (!data)
2826                         continue;
2827
2828                 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2829                                        bp->rx_buf_use_size, bp->rx_dir,
2830                                        DMA_ATTR_WEAK_ORDERING);
2831
2832                 tpa_info->data = NULL;
2833
2834                 skb_free_frag(data);
2835         }
2836
2837 skip_rx_tpa_free:
2838         if (!rxr->rx_buf_ring)
2839                 goto skip_rx_buf_free;
2840
2841         for (i = 0; i < max_idx; i++) {
2842                 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2843                 dma_addr_t mapping = rx_buf->mapping;
2844                 void *data = rx_buf->data;
2845
2846                 if (!data)
2847                         continue;
2848
2849                 rx_buf->data = NULL;
2850                 if (BNXT_RX_PAGE_MODE(bp)) {
2851                         mapping -= bp->rx_dma_offset;
2852                         dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2853                                              bp->rx_dir,
2854                                              DMA_ATTR_WEAK_ORDERING);
2855                         page_pool_recycle_direct(rxr->page_pool, data);
2856                 } else {
2857                         dma_unmap_single_attrs(&pdev->dev, mapping,
2858                                                bp->rx_buf_use_size, bp->rx_dir,
2859                                                DMA_ATTR_WEAK_ORDERING);
2860                         skb_free_frag(data);
2861                 }
2862         }
2863
2864 skip_rx_buf_free:
2865         if (!rxr->rx_agg_ring)
2866                 goto skip_rx_agg_free;
2867
2868         for (i = 0; i < max_agg_idx; i++) {
2869                 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2870                 struct page *page = rx_agg_buf->page;
2871
2872                 if (!page)
2873                         continue;
2874
2875                 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2876                                      BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
2877                                      DMA_ATTR_WEAK_ORDERING);
2878
2879                 rx_agg_buf->page = NULL;
2880                 __clear_bit(i, rxr->rx_agg_bmap);
2881
2882                 __free_page(page);
2883         }
2884
2885 skip_rx_agg_free:
2886         if (rxr->rx_page) {
2887                 __free_page(rxr->rx_page);
2888                 rxr->rx_page = NULL;
2889         }
2890         map = rxr->rx_tpa_idx_map;
2891         if (map)
2892                 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2893 }
2894
2895 static void bnxt_free_rx_skbs(struct bnxt *bp)
2896 {
2897         int i;
2898
2899         if (!bp->rx_ring)
2900                 return;
2901
2902         for (i = 0; i < bp->rx_nr_rings; i++)
2903                 bnxt_free_one_rx_ring_skbs(bp, i);
2904 }
2905
2906 static void bnxt_free_skbs(struct bnxt *bp)
2907 {
2908         bnxt_free_tx_skbs(bp);
2909         bnxt_free_rx_skbs(bp);
2910 }
2911
2912 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2913 {
2914         u8 init_val = mem_init->init_val;
2915         u16 offset = mem_init->offset;
2916         u8 *p2 = p;
2917         int i;
2918
2919         if (!init_val)
2920                 return;
2921         if (offset == BNXT_MEM_INVALID_OFFSET) {
2922                 memset(p, init_val, len);
2923                 return;
2924         }
2925         for (i = 0; i < len; i += mem_init->size)
2926                 *(p2 + i + offset) = init_val;
2927 }
2928
2929 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2930 {
2931         struct pci_dev *pdev = bp->pdev;
2932         int i;
2933
2934         if (!rmem->pg_arr)
2935                 goto skip_pages;
2936
2937         for (i = 0; i < rmem->nr_pages; i++) {
2938                 if (!rmem->pg_arr[i])
2939                         continue;
2940
2941                 dma_free_coherent(&pdev->dev, rmem->page_size,
2942                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2943
2944                 rmem->pg_arr[i] = NULL;
2945         }
2946 skip_pages:
2947         if (rmem->pg_tbl) {
2948                 size_t pg_tbl_size = rmem->nr_pages * 8;
2949
2950                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2951                         pg_tbl_size = rmem->page_size;
2952                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2953                                   rmem->pg_tbl, rmem->pg_tbl_map);
2954                 rmem->pg_tbl = NULL;
2955         }
2956         if (rmem->vmem_size && *rmem->vmem) {
2957                 vfree(*rmem->vmem);
2958                 *rmem->vmem = NULL;
2959         }
2960 }
2961
2962 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2963 {
2964         struct pci_dev *pdev = bp->pdev;
2965         u64 valid_bit = 0;
2966         int i;
2967
2968         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2969                 valid_bit = PTU_PTE_VALID;
2970         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2971                 size_t pg_tbl_size = rmem->nr_pages * 8;
2972
2973                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2974                         pg_tbl_size = rmem->page_size;
2975                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2976                                                   &rmem->pg_tbl_map,
2977                                                   GFP_KERNEL);
2978                 if (!rmem->pg_tbl)
2979                         return -ENOMEM;
2980         }
2981
2982         for (i = 0; i < rmem->nr_pages; i++) {
2983                 u64 extra_bits = valid_bit;
2984
2985                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2986                                                      rmem->page_size,
2987                                                      &rmem->dma_arr[i],
2988                                                      GFP_KERNEL);
2989                 if (!rmem->pg_arr[i])
2990                         return -ENOMEM;
2991
2992                 if (rmem->mem_init)
2993                         bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2994                                           rmem->page_size);
2995                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2996                         if (i == rmem->nr_pages - 2 &&
2997                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2998                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2999                         else if (i == rmem->nr_pages - 1 &&
3000                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3001                                 extra_bits |= PTU_PTE_LAST;
3002                         rmem->pg_tbl[i] =
3003                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3004                 }
3005         }
3006
3007         if (rmem->vmem_size) {
3008                 *rmem->vmem = vzalloc(rmem->vmem_size);
3009                 if (!(*rmem->vmem))
3010                         return -ENOMEM;
3011         }
3012         return 0;
3013 }
3014
3015 static void bnxt_free_tpa_info(struct bnxt *bp)
3016 {
3017         int i;
3018
3019         for (i = 0; i < bp->rx_nr_rings; i++) {
3020                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3021
3022                 kfree(rxr->rx_tpa_idx_map);
3023                 rxr->rx_tpa_idx_map = NULL;
3024                 if (rxr->rx_tpa) {
3025                         kfree(rxr->rx_tpa[0].agg_arr);
3026                         rxr->rx_tpa[0].agg_arr = NULL;
3027                 }
3028                 kfree(rxr->rx_tpa);
3029                 rxr->rx_tpa = NULL;
3030         }
3031 }
3032
3033 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3034 {
3035         int i, j, total_aggs = 0;
3036
3037         bp->max_tpa = MAX_TPA;
3038         if (bp->flags & BNXT_FLAG_CHIP_P5) {
3039                 if (!bp->max_tpa_v2)
3040                         return 0;
3041                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3042                 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
3043         }
3044
3045         for (i = 0; i < bp->rx_nr_rings; i++) {
3046                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3047                 struct rx_agg_cmp *agg;
3048
3049                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3050                                       GFP_KERNEL);
3051                 if (!rxr->rx_tpa)
3052                         return -ENOMEM;
3053
3054                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3055                         continue;
3056                 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3057                 rxr->rx_tpa[0].agg_arr = agg;
3058                 if (!agg)
3059                         return -ENOMEM;
3060                 for (j = 1; j < bp->max_tpa; j++)
3061                         rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
3062                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3063                                               GFP_KERNEL);
3064                 if (!rxr->rx_tpa_idx_map)
3065                         return -ENOMEM;
3066         }
3067         return 0;
3068 }
3069
3070 static void bnxt_free_rx_rings(struct bnxt *bp)
3071 {
3072         int i;
3073
3074         if (!bp->rx_ring)
3075                 return;
3076
3077         bnxt_free_tpa_info(bp);
3078         for (i = 0; i < bp->rx_nr_rings; i++) {
3079                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3080                 struct bnxt_ring_struct *ring;
3081
3082                 if (rxr->xdp_prog)
3083                         bpf_prog_put(rxr->xdp_prog);
3084
3085                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3086                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3087
3088                 page_pool_destroy(rxr->page_pool);
3089                 rxr->page_pool = NULL;
3090
3091                 kfree(rxr->rx_agg_bmap);
3092                 rxr->rx_agg_bmap = NULL;
3093
3094                 ring = &rxr->rx_ring_struct;
3095                 bnxt_free_ring(bp, &ring->ring_mem);
3096
3097                 ring = &rxr->rx_agg_ring_struct;
3098                 bnxt_free_ring(bp, &ring->ring_mem);
3099         }
3100 }
3101
3102 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3103                                    struct bnxt_rx_ring_info *rxr)
3104 {
3105         struct page_pool_params pp = { 0 };
3106
3107         pp.pool_size = bp->rx_ring_size;
3108         pp.nid = dev_to_node(&bp->pdev->dev);
3109         pp.dev = &bp->pdev->dev;
3110         pp.dma_dir = DMA_BIDIRECTIONAL;
3111
3112         rxr->page_pool = page_pool_create(&pp);
3113         if (IS_ERR(rxr->page_pool)) {
3114                 int err = PTR_ERR(rxr->page_pool);
3115
3116                 rxr->page_pool = NULL;
3117                 return err;
3118         }
3119         return 0;
3120 }
3121
3122 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3123 {
3124         int i, rc = 0, agg_rings = 0;
3125
3126         if (!bp->rx_ring)
3127                 return -ENOMEM;
3128
3129         if (bp->flags & BNXT_FLAG_AGG_RINGS)
3130                 agg_rings = 1;
3131
3132         for (i = 0; i < bp->rx_nr_rings; i++) {
3133                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3134                 struct bnxt_ring_struct *ring;
3135
3136                 ring = &rxr->rx_ring_struct;
3137
3138                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3139                 if (rc)
3140                         return rc;
3141
3142                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3143                 if (rc < 0)
3144                         return rc;
3145
3146                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3147                                                 MEM_TYPE_PAGE_POOL,
3148                                                 rxr->page_pool);
3149                 if (rc) {
3150                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3151                         return rc;
3152                 }
3153
3154                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3155                 if (rc)
3156                         return rc;
3157
3158                 ring->grp_idx = i;
3159                 if (agg_rings) {
3160                         u16 mem_size;
3161
3162                         ring = &rxr->rx_agg_ring_struct;
3163                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3164                         if (rc)
3165                                 return rc;
3166
3167                         ring->grp_idx = i;
3168                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3169                         mem_size = rxr->rx_agg_bmap_size / 8;
3170                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3171                         if (!rxr->rx_agg_bmap)
3172                                 return -ENOMEM;
3173                 }
3174         }
3175         if (bp->flags & BNXT_FLAG_TPA)
3176                 rc = bnxt_alloc_tpa_info(bp);
3177         return rc;
3178 }
3179
3180 static void bnxt_free_tx_rings(struct bnxt *bp)
3181 {
3182         int i;
3183         struct pci_dev *pdev = bp->pdev;
3184
3185         if (!bp->tx_ring)
3186                 return;
3187
3188         for (i = 0; i < bp->tx_nr_rings; i++) {
3189                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3190                 struct bnxt_ring_struct *ring;
3191
3192                 if (txr->tx_push) {
3193                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
3194                                           txr->tx_push, txr->tx_push_mapping);
3195                         txr->tx_push = NULL;
3196                 }
3197
3198                 ring = &txr->tx_ring_struct;
3199
3200                 bnxt_free_ring(bp, &ring->ring_mem);
3201         }
3202 }
3203
3204 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3205 {
3206         int i, j, rc;
3207         struct pci_dev *pdev = bp->pdev;
3208
3209         bp->tx_push_size = 0;
3210         if (bp->tx_push_thresh) {
3211                 int push_size;
3212
3213                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3214                                         bp->tx_push_thresh);
3215
3216                 if (push_size > 256) {
3217                         push_size = 0;
3218                         bp->tx_push_thresh = 0;
3219                 }
3220
3221                 bp->tx_push_size = push_size;
3222         }
3223
3224         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3225                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3226                 struct bnxt_ring_struct *ring;
3227                 u8 qidx;
3228
3229                 ring = &txr->tx_ring_struct;
3230
3231                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3232                 if (rc)
3233                         return rc;
3234
3235                 ring->grp_idx = txr->bnapi->index;
3236                 if (bp->tx_push_size) {
3237                         dma_addr_t mapping;
3238
3239                         /* One pre-allocated DMA buffer to backup
3240                          * TX push operation
3241                          */
3242                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
3243                                                 bp->tx_push_size,
3244                                                 &txr->tx_push_mapping,
3245                                                 GFP_KERNEL);
3246
3247                         if (!txr->tx_push)
3248                                 return -ENOMEM;
3249
3250                         mapping = txr->tx_push_mapping +
3251                                 sizeof(struct tx_push_bd);
3252                         txr->data_mapping = cpu_to_le64(mapping);
3253                 }
3254                 qidx = bp->tc_to_qidx[j];
3255                 ring->queue_id = bp->q_info[qidx].queue_id;
3256                 if (i < bp->tx_nr_rings_xdp)
3257                         continue;
3258                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3259                         j++;
3260         }
3261         return 0;
3262 }
3263
3264 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3265 {
3266         struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3267
3268         kfree(cpr->cp_desc_ring);
3269         cpr->cp_desc_ring = NULL;
3270         ring->ring_mem.pg_arr = NULL;
3271         kfree(cpr->cp_desc_mapping);
3272         cpr->cp_desc_mapping = NULL;
3273         ring->ring_mem.dma_arr = NULL;
3274 }
3275
3276 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3277 {
3278         cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3279         if (!cpr->cp_desc_ring)
3280                 return -ENOMEM;
3281         cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3282                                        GFP_KERNEL);
3283         if (!cpr->cp_desc_mapping)
3284                 return -ENOMEM;
3285         return 0;
3286 }
3287
3288 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3289 {
3290         int i;
3291
3292         if (!bp->bnapi)
3293                 return;
3294         for (i = 0; i < bp->cp_nr_rings; i++) {
3295                 struct bnxt_napi *bnapi = bp->bnapi[i];
3296
3297                 if (!bnapi)
3298                         continue;
3299                 bnxt_free_cp_arrays(&bnapi->cp_ring);
3300         }
3301 }
3302
3303 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3304 {
3305         int i, n = bp->cp_nr_pages;
3306
3307         for (i = 0; i < bp->cp_nr_rings; i++) {
3308                 struct bnxt_napi *bnapi = bp->bnapi[i];
3309                 int rc;
3310
3311                 if (!bnapi)
3312                         continue;
3313                 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3314                 if (rc)
3315                         return rc;
3316         }
3317         return 0;
3318 }
3319
3320 static void bnxt_free_cp_rings(struct bnxt *bp)
3321 {
3322         int i;
3323
3324         if (!bp->bnapi)
3325                 return;
3326
3327         for (i = 0; i < bp->cp_nr_rings; i++) {
3328                 struct bnxt_napi *bnapi = bp->bnapi[i];
3329                 struct bnxt_cp_ring_info *cpr;
3330                 struct bnxt_ring_struct *ring;
3331                 int j;
3332
3333                 if (!bnapi)
3334                         continue;
3335
3336                 cpr = &bnapi->cp_ring;
3337                 ring = &cpr->cp_ring_struct;
3338
3339                 bnxt_free_ring(bp, &ring->ring_mem);
3340
3341                 for (j = 0; j < 2; j++) {
3342                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3343
3344                         if (cpr2) {
3345                                 ring = &cpr2->cp_ring_struct;
3346                                 bnxt_free_ring(bp, &ring->ring_mem);
3347                                 bnxt_free_cp_arrays(cpr2);
3348                                 kfree(cpr2);
3349                                 cpr->cp_ring_arr[j] = NULL;
3350                         }
3351                 }
3352         }
3353 }
3354
3355 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3356 {
3357         struct bnxt_ring_mem_info *rmem;
3358         struct bnxt_ring_struct *ring;
3359         struct bnxt_cp_ring_info *cpr;
3360         int rc;
3361
3362         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3363         if (!cpr)
3364                 return NULL;
3365
3366         rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3367         if (rc) {
3368                 bnxt_free_cp_arrays(cpr);
3369                 kfree(cpr);
3370                 return NULL;
3371         }
3372         ring = &cpr->cp_ring_struct;
3373         rmem = &ring->ring_mem;
3374         rmem->nr_pages = bp->cp_nr_pages;
3375         rmem->page_size = HW_CMPD_RING_SIZE;
3376         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3377         rmem->dma_arr = cpr->cp_desc_mapping;
3378         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3379         rc = bnxt_alloc_ring(bp, rmem);
3380         if (rc) {
3381                 bnxt_free_ring(bp, rmem);
3382                 bnxt_free_cp_arrays(cpr);
3383                 kfree(cpr);
3384                 cpr = NULL;
3385         }
3386         return cpr;
3387 }
3388
3389 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3390 {
3391         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3392         int i, rc, ulp_base_vec, ulp_msix;
3393
3394         ulp_msix = bnxt_get_ulp_msix_num(bp);
3395         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3396         for (i = 0; i < bp->cp_nr_rings; i++) {
3397                 struct bnxt_napi *bnapi = bp->bnapi[i];
3398                 struct bnxt_cp_ring_info *cpr;
3399                 struct bnxt_ring_struct *ring;
3400
3401                 if (!bnapi)
3402                         continue;
3403
3404                 cpr = &bnapi->cp_ring;
3405                 cpr->bnapi = bnapi;
3406                 ring = &cpr->cp_ring_struct;
3407
3408                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3409                 if (rc)
3410                         return rc;
3411
3412                 if (ulp_msix && i >= ulp_base_vec)
3413                         ring->map_idx = i + ulp_msix;
3414                 else
3415                         ring->map_idx = i;
3416
3417                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3418                         continue;
3419
3420                 if (i < bp->rx_nr_rings) {
3421                         struct bnxt_cp_ring_info *cpr2 =
3422                                 bnxt_alloc_cp_sub_ring(bp);
3423
3424                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3425                         if (!cpr2)
3426                                 return -ENOMEM;
3427                         cpr2->bnapi = bnapi;
3428                 }
3429                 if ((sh && i < bp->tx_nr_rings) ||
3430                     (!sh && i >= bp->rx_nr_rings)) {
3431                         struct bnxt_cp_ring_info *cpr2 =
3432                                 bnxt_alloc_cp_sub_ring(bp);
3433
3434                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3435                         if (!cpr2)
3436                                 return -ENOMEM;
3437                         cpr2->bnapi = bnapi;
3438                 }
3439         }
3440         return 0;
3441 }
3442
3443 static void bnxt_init_ring_struct(struct bnxt *bp)
3444 {
3445         int i;
3446
3447         for (i = 0; i < bp->cp_nr_rings; i++) {
3448                 struct bnxt_napi *bnapi = bp->bnapi[i];
3449                 struct bnxt_ring_mem_info *rmem;
3450                 struct bnxt_cp_ring_info *cpr;
3451                 struct bnxt_rx_ring_info *rxr;
3452                 struct bnxt_tx_ring_info *txr;
3453                 struct bnxt_ring_struct *ring;
3454
3455                 if (!bnapi)
3456                         continue;
3457
3458                 cpr = &bnapi->cp_ring;
3459                 ring = &cpr->cp_ring_struct;
3460                 rmem = &ring->ring_mem;
3461                 rmem->nr_pages = bp->cp_nr_pages;
3462                 rmem->page_size = HW_CMPD_RING_SIZE;
3463                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3464                 rmem->dma_arr = cpr->cp_desc_mapping;
3465                 rmem->vmem_size = 0;
3466
3467                 rxr = bnapi->rx_ring;
3468                 if (!rxr)
3469                         goto skip_rx;
3470
3471                 ring = &rxr->rx_ring_struct;
3472                 rmem = &ring->ring_mem;
3473                 rmem->nr_pages = bp->rx_nr_pages;
3474                 rmem->page_size = HW_RXBD_RING_SIZE;
3475                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3476                 rmem->dma_arr = rxr->rx_desc_mapping;
3477                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3478                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3479
3480                 ring = &rxr->rx_agg_ring_struct;
3481                 rmem = &ring->ring_mem;
3482                 rmem->nr_pages = bp->rx_agg_nr_pages;
3483                 rmem->page_size = HW_RXBD_RING_SIZE;
3484                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3485                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3486                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3487                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3488
3489 skip_rx:
3490                 txr = bnapi->tx_ring;
3491                 if (!txr)
3492                         continue;
3493
3494                 ring = &txr->tx_ring_struct;
3495                 rmem = &ring->ring_mem;
3496                 rmem->nr_pages = bp->tx_nr_pages;
3497                 rmem->page_size = HW_RXBD_RING_SIZE;
3498                 rmem->pg_arr = (void **)txr->tx_desc_ring;
3499                 rmem->dma_arr = txr->tx_desc_mapping;
3500                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3501                 rmem->vmem = (void **)&txr->tx_buf_ring;
3502         }
3503 }
3504
3505 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3506 {
3507         int i;
3508         u32 prod;
3509         struct rx_bd **rx_buf_ring;
3510
3511         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3512         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3513                 int j;
3514                 struct rx_bd *rxbd;
3515
3516                 rxbd = rx_buf_ring[i];
3517                 if (!rxbd)
3518                         continue;
3519
3520                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3521                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3522                         rxbd->rx_bd_opaque = prod;
3523                 }
3524         }
3525 }
3526
3527 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3528 {
3529         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3530         struct net_device *dev = bp->dev;
3531         u32 prod;
3532         int i;
3533
3534         prod = rxr->rx_prod;
3535         for (i = 0; i < bp->rx_ring_size; i++) {
3536                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3537                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3538                                     ring_nr, i, bp->rx_ring_size);
3539                         break;
3540                 }
3541                 prod = NEXT_RX(prod);
3542         }
3543         rxr->rx_prod = prod;
3544
3545         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3546                 return 0;
3547
3548         prod = rxr->rx_agg_prod;
3549         for (i = 0; i < bp->rx_agg_ring_size; i++) {
3550                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3551                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3552                                     ring_nr, i, bp->rx_ring_size);
3553                         break;
3554                 }
3555                 prod = NEXT_RX_AGG(prod);
3556         }
3557         rxr->rx_agg_prod = prod;
3558
3559         if (rxr->rx_tpa) {
3560                 dma_addr_t mapping;
3561                 u8 *data;
3562
3563                 for (i = 0; i < bp->max_tpa; i++) {
3564                         data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
3565                         if (!data)
3566                                 return -ENOMEM;
3567
3568                         rxr->rx_tpa[i].data = data;
3569                         rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3570                         rxr->rx_tpa[i].mapping = mapping;
3571                 }
3572         }
3573         return 0;
3574 }
3575
3576 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3577 {
3578         struct bnxt_rx_ring_info *rxr;
3579         struct bnxt_ring_struct *ring;
3580         u32 type;
3581
3582         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3583                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3584
3585         if (NET_IP_ALIGN == 2)
3586                 type |= RX_BD_FLAGS_SOP;
3587
3588         rxr = &bp->rx_ring[ring_nr];
3589         ring = &rxr->rx_ring_struct;
3590         bnxt_init_rxbd_pages(ring, type);
3591
3592         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3593                 bpf_prog_add(bp->xdp_prog, 1);
3594                 rxr->xdp_prog = bp->xdp_prog;
3595         }
3596         ring->fw_ring_id = INVALID_HW_RING_ID;
3597
3598         ring = &rxr->rx_agg_ring_struct;
3599         ring->fw_ring_id = INVALID_HW_RING_ID;
3600
3601         if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3602                 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3603                         RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3604
3605                 bnxt_init_rxbd_pages(ring, type);
3606         }
3607
3608         return bnxt_alloc_one_rx_ring(bp, ring_nr);
3609 }
3610
3611 static void bnxt_init_cp_rings(struct bnxt *bp)
3612 {
3613         int i, j;
3614
3615         for (i = 0; i < bp->cp_nr_rings; i++) {
3616                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3617                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3618
3619                 ring->fw_ring_id = INVALID_HW_RING_ID;
3620                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3621                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3622                 for (j = 0; j < 2; j++) {
3623                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3624
3625                         if (!cpr2)
3626                                 continue;
3627
3628                         ring = &cpr2->cp_ring_struct;
3629                         ring->fw_ring_id = INVALID_HW_RING_ID;
3630                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3631                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3632                 }
3633         }
3634 }
3635
3636 static int bnxt_init_rx_rings(struct bnxt *bp)
3637 {
3638         int i, rc = 0;
3639
3640         if (BNXT_RX_PAGE_MODE(bp)) {
3641                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3642                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3643         } else {
3644                 bp->rx_offset = BNXT_RX_OFFSET;
3645                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3646         }
3647
3648         for (i = 0; i < bp->rx_nr_rings; i++) {
3649                 rc = bnxt_init_one_rx_ring(bp, i);
3650                 if (rc)
3651                         break;
3652         }
3653
3654         return rc;
3655 }
3656
3657 static int bnxt_init_tx_rings(struct bnxt *bp)
3658 {
3659         u16 i;
3660
3661         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3662                                    BNXT_MIN_TX_DESC_CNT);
3663
3664         for (i = 0; i < bp->tx_nr_rings; i++) {
3665                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3666                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3667
3668                 ring->fw_ring_id = INVALID_HW_RING_ID;
3669         }
3670
3671         return 0;
3672 }
3673
3674 static void bnxt_free_ring_grps(struct bnxt *bp)
3675 {
3676         kfree(bp->grp_info);
3677         bp->grp_info = NULL;
3678 }
3679
3680 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3681 {
3682         int i;
3683
3684         if (irq_re_init) {
3685                 bp->grp_info = kcalloc(bp->cp_nr_rings,
3686                                        sizeof(struct bnxt_ring_grp_info),
3687                                        GFP_KERNEL);
3688                 if (!bp->grp_info)
3689                         return -ENOMEM;
3690         }
3691         for (i = 0; i < bp->cp_nr_rings; i++) {
3692                 if (irq_re_init)
3693                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3694                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3695                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3696                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3697                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3698         }
3699         return 0;
3700 }
3701
3702 static void bnxt_free_vnics(struct bnxt *bp)
3703 {
3704         kfree(bp->vnic_info);
3705         bp->vnic_info = NULL;
3706         bp->nr_vnics = 0;
3707 }
3708
3709 static int bnxt_alloc_vnics(struct bnxt *bp)
3710 {
3711         int num_vnics = 1;
3712
3713 #ifdef CONFIG_RFS_ACCEL
3714         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3715                 num_vnics += bp->rx_nr_rings;
3716 #endif
3717
3718         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3719                 num_vnics++;
3720
3721         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3722                                 GFP_KERNEL);
3723         if (!bp->vnic_info)
3724                 return -ENOMEM;
3725
3726         bp->nr_vnics = num_vnics;
3727         return 0;
3728 }
3729
3730 static void bnxt_init_vnics(struct bnxt *bp)
3731 {
3732         int i;
3733
3734         for (i = 0; i < bp->nr_vnics; i++) {
3735                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3736                 int j;
3737
3738                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3739                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3740                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3741
3742                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3743
3744                 if (bp->vnic_info[i].rss_hash_key) {
3745                         if (i == 0)
3746                                 prandom_bytes(vnic->rss_hash_key,
3747                                               HW_HASH_KEY_SIZE);
3748                         else
3749                                 memcpy(vnic->rss_hash_key,
3750                                        bp->vnic_info[0].rss_hash_key,
3751                                        HW_HASH_KEY_SIZE);
3752                 }
3753         }
3754 }
3755
3756 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3757 {
3758         int pages;
3759
3760         pages = ring_size / desc_per_pg;
3761
3762         if (!pages)
3763                 return 1;
3764
3765         pages++;
3766
3767         while (pages & (pages - 1))
3768                 pages++;
3769
3770         return pages;
3771 }
3772
3773 void bnxt_set_tpa_flags(struct bnxt *bp)
3774 {
3775         bp->flags &= ~BNXT_FLAG_TPA;
3776         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3777                 return;
3778         if (bp->dev->features & NETIF_F_LRO)
3779                 bp->flags |= BNXT_FLAG_LRO;
3780         else if (bp->dev->features & NETIF_F_GRO_HW)
3781                 bp->flags |= BNXT_FLAG_GRO;
3782 }
3783
3784 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3785  * be set on entry.
3786  */
3787 void bnxt_set_ring_params(struct bnxt *bp)
3788 {
3789         u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3790         u32 agg_factor = 0, agg_ring_size = 0;
3791
3792         /* 8 for CRC and VLAN */
3793         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3794
3795         rx_space = rx_size + NET_SKB_PAD +
3796                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3797
3798         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3799         ring_size = bp->rx_ring_size;
3800         bp->rx_agg_ring_size = 0;
3801         bp->rx_agg_nr_pages = 0;
3802
3803         if (bp->flags & BNXT_FLAG_TPA)
3804                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3805
3806         bp->flags &= ~BNXT_FLAG_JUMBO;
3807         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3808                 u32 jumbo_factor;
3809
3810                 bp->flags |= BNXT_FLAG_JUMBO;
3811                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3812                 if (jumbo_factor > agg_factor)
3813                         agg_factor = jumbo_factor;
3814         }
3815         if (agg_factor) {
3816                 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3817                         ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3818                         netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3819                                     bp->rx_ring_size, ring_size);
3820                         bp->rx_ring_size = ring_size;
3821                 }
3822                 agg_ring_size = ring_size * agg_factor;
3823
3824                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3825                                                         RX_DESC_CNT);
3826                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3827                         u32 tmp = agg_ring_size;
3828
3829                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3830                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3831                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3832                                     tmp, agg_ring_size);
3833                 }
3834                 bp->rx_agg_ring_size = agg_ring_size;
3835                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3836                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3837                 rx_space = rx_size + NET_SKB_PAD +
3838                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3839         }
3840
3841         bp->rx_buf_use_size = rx_size;
3842         bp->rx_buf_size = rx_space;
3843
3844         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3845         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3846
3847         ring_size = bp->tx_ring_size;
3848         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3849         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3850
3851         max_rx_cmpl = bp->rx_ring_size;
3852         /* MAX TPA needs to be added because TPA_START completions are
3853          * immediately recycled, so the TPA completions are not bound by
3854          * the RX ring size.
3855          */
3856         if (bp->flags & BNXT_FLAG_TPA)
3857                 max_rx_cmpl += bp->max_tpa;
3858         /* RX and TPA completions are 32-byte, all others are 16-byte */
3859         ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3860         bp->cp_ring_size = ring_size;
3861
3862         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3863         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3864                 bp->cp_nr_pages = MAX_CP_PAGES;
3865                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3866                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3867                             ring_size, bp->cp_ring_size);
3868         }
3869         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3870         bp->cp_ring_mask = bp->cp_bit - 1;
3871 }
3872
3873 /* Changing allocation mode of RX rings.
3874  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3875  */
3876 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3877 {
3878         if (page_mode) {
3879                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3880                         return -EOPNOTSUPP;
3881                 bp->dev->max_mtu =
3882                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3883                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3884                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3885                 bp->rx_dir = DMA_BIDIRECTIONAL;
3886                 bp->rx_skb_func = bnxt_rx_page_skb;
3887                 /* Disable LRO or GRO_HW */
3888                 netdev_update_features(bp->dev);
3889         } else {
3890                 bp->dev->max_mtu = bp->max_mtu;
3891                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3892                 bp->rx_dir = DMA_FROM_DEVICE;
3893                 bp->rx_skb_func = bnxt_rx_skb;
3894         }
3895         return 0;
3896 }
3897
3898 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3899 {
3900         int i;
3901         struct bnxt_vnic_info *vnic;
3902         struct pci_dev *pdev = bp->pdev;
3903
3904         if (!bp->vnic_info)
3905                 return;
3906
3907         for (i = 0; i < bp->nr_vnics; i++) {
3908                 vnic = &bp->vnic_info[i];
3909
3910                 kfree(vnic->fw_grp_ids);
3911                 vnic->fw_grp_ids = NULL;
3912
3913                 kfree(vnic->uc_list);
3914                 vnic->uc_list = NULL;
3915
3916                 if (vnic->mc_list) {
3917                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3918                                           vnic->mc_list, vnic->mc_list_mapping);
3919                         vnic->mc_list = NULL;
3920                 }
3921
3922                 if (vnic->rss_table) {
3923                         dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3924                                           vnic->rss_table,
3925                                           vnic->rss_table_dma_addr);
3926                         vnic->rss_table = NULL;
3927                 }
3928
3929                 vnic->rss_hash_key = NULL;
3930                 vnic->flags = 0;
3931         }
3932 }
3933
3934 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3935 {
3936         int i, rc = 0, size;
3937         struct bnxt_vnic_info *vnic;
3938         struct pci_dev *pdev = bp->pdev;
3939         int max_rings;
3940
3941         for (i = 0; i < bp->nr_vnics; i++) {
3942                 vnic = &bp->vnic_info[i];
3943
3944                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3945                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3946
3947                         if (mem_size > 0) {
3948                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3949                                 if (!vnic->uc_list) {
3950                                         rc = -ENOMEM;
3951                                         goto out;
3952                                 }
3953                         }
3954                 }
3955
3956                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3957                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3958                         vnic->mc_list =
3959                                 dma_alloc_coherent(&pdev->dev,
3960                                                    vnic->mc_list_size,
3961                                                    &vnic->mc_list_mapping,
3962                                                    GFP_KERNEL);
3963                         if (!vnic->mc_list) {
3964                                 rc = -ENOMEM;
3965                                 goto out;
3966                         }
3967                 }
3968
3969                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3970                         goto vnic_skip_grps;
3971
3972                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3973                         max_rings = bp->rx_nr_rings;
3974                 else
3975                         max_rings = 1;
3976
3977                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3978                 if (!vnic->fw_grp_ids) {
3979                         rc = -ENOMEM;
3980                         goto out;
3981                 }
3982 vnic_skip_grps:
3983                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3984                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3985                         continue;
3986
3987                 /* Allocate rss table and hash key */
3988                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3989                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3990                         size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3991
3992                 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3993                 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3994                                                      vnic->rss_table_size,
3995                                                      &vnic->rss_table_dma_addr,
3996                                                      GFP_KERNEL);
3997                 if (!vnic->rss_table) {
3998                         rc = -ENOMEM;
3999                         goto out;
4000                 }
4001
4002                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4003                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4004         }
4005         return 0;
4006
4007 out:
4008         return rc;
4009 }
4010
4011 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4012 {
4013         struct bnxt_hwrm_wait_token *token;
4014
4015         dma_pool_destroy(bp->hwrm_dma_pool);
4016         bp->hwrm_dma_pool = NULL;
4017
4018         rcu_read_lock();
4019         hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4020                 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4021         rcu_read_unlock();
4022 }
4023
4024 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4025 {
4026         bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4027                                             BNXT_HWRM_DMA_SIZE,
4028                                             BNXT_HWRM_DMA_ALIGN, 0);
4029         if (!bp->hwrm_dma_pool)
4030                 return -ENOMEM;
4031
4032         INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4033
4034         return 0;
4035 }
4036
4037 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4038 {
4039         kfree(stats->hw_masks);
4040         stats->hw_masks = NULL;
4041         kfree(stats->sw_stats);
4042         stats->sw_stats = NULL;
4043         if (stats->hw_stats) {
4044                 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4045                                   stats->hw_stats_map);
4046                 stats->hw_stats = NULL;
4047         }
4048 }
4049
4050 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4051                                 bool alloc_masks)
4052 {
4053         stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4054                                              &stats->hw_stats_map, GFP_KERNEL);
4055         if (!stats->hw_stats)
4056                 return -ENOMEM;
4057
4058         stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4059         if (!stats->sw_stats)
4060                 goto stats_mem_err;
4061
4062         if (alloc_masks) {
4063                 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4064                 if (!stats->hw_masks)
4065                         goto stats_mem_err;
4066         }
4067         return 0;
4068
4069 stats_mem_err:
4070         bnxt_free_stats_mem(bp, stats);
4071         return -ENOMEM;
4072 }
4073
4074 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4075 {
4076         int i;
4077
4078         for (i = 0; i < count; i++)
4079                 mask_arr[i] = mask;
4080 }
4081
4082 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4083 {
4084         int i;
4085
4086         for (i = 0; i < count; i++)
4087                 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4088 }
4089
4090 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4091                                     struct bnxt_stats_mem *stats)
4092 {
4093         struct hwrm_func_qstats_ext_output *resp;
4094         struct hwrm_func_qstats_ext_input *req;
4095         __le64 *hw_masks;
4096         int rc;
4097
4098         if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4099             !(bp->flags & BNXT_FLAG_CHIP_P5))
4100                 return -EOPNOTSUPP;
4101
4102         rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4103         if (rc)
4104                 return rc;
4105
4106         req->fid = cpu_to_le16(0xffff);
4107         req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4108
4109         resp = hwrm_req_hold(bp, req);
4110         rc = hwrm_req_send(bp, req);
4111         if (!rc) {
4112                 hw_masks = &resp->rx_ucast_pkts;
4113                 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4114         }
4115         hwrm_req_drop(bp, req);
4116         return rc;
4117 }
4118
4119 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4120 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4121
4122 static void bnxt_init_stats(struct bnxt *bp)
4123 {
4124         struct bnxt_napi *bnapi = bp->bnapi[0];
4125         struct bnxt_cp_ring_info *cpr;
4126         struct bnxt_stats_mem *stats;
4127         __le64 *rx_stats, *tx_stats;
4128         int rc, rx_count, tx_count;
4129         u64 *rx_masks, *tx_masks;
4130         u64 mask;
4131         u8 flags;
4132
4133         cpr = &bnapi->cp_ring;
4134         stats = &cpr->stats;
4135         rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4136         if (rc) {
4137                 if (bp->flags & BNXT_FLAG_CHIP_P5)
4138                         mask = (1ULL << 48) - 1;
4139                 else
4140                         mask = -1ULL;
4141                 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4142         }
4143         if (bp->flags & BNXT_FLAG_PORT_STATS) {
4144                 stats = &bp->port_stats;
4145                 rx_stats = stats->hw_stats;
4146                 rx_masks = stats->hw_masks;
4147                 rx_count = sizeof(struct rx_port_stats) / 8;
4148                 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4149                 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4150                 tx_count = sizeof(struct tx_port_stats) / 8;
4151
4152                 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4153                 rc = bnxt_hwrm_port_qstats(bp, flags);
4154                 if (rc) {
4155                         mask = (1ULL << 40) - 1;
4156
4157                         bnxt_fill_masks(rx_masks, mask, rx_count);
4158                         bnxt_fill_masks(tx_masks, mask, tx_count);
4159                 } else {
4160                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4161                         bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4162                         bnxt_hwrm_port_qstats(bp, 0);
4163                 }
4164         }
4165         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4166                 stats = &bp->rx_port_stats_ext;
4167                 rx_stats = stats->hw_stats;
4168                 rx_masks = stats->hw_masks;
4169                 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4170                 stats = &bp->tx_port_stats_ext;
4171                 tx_stats = stats->hw_stats;
4172                 tx_masks = stats->hw_masks;
4173                 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4174
4175                 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4176                 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4177                 if (rc) {
4178                         mask = (1ULL << 40) - 1;
4179
4180                         bnxt_fill_masks(rx_masks, mask, rx_count);
4181                         if (tx_stats)
4182                                 bnxt_fill_masks(tx_masks, mask, tx_count);
4183                 } else {
4184                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4185                         if (tx_stats)
4186                                 bnxt_copy_hw_masks(tx_masks, tx_stats,
4187                                                    tx_count);
4188                         bnxt_hwrm_port_qstats_ext(bp, 0);
4189                 }
4190         }
4191 }
4192
4193 static void bnxt_free_port_stats(struct bnxt *bp)
4194 {
4195         bp->flags &= ~BNXT_FLAG_PORT_STATS;
4196         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4197
4198         bnxt_free_stats_mem(bp, &bp->port_stats);
4199         bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4200         bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4201 }
4202
4203 static void bnxt_free_ring_stats(struct bnxt *bp)
4204 {
4205         int i;
4206
4207         if (!bp->bnapi)
4208                 return;
4209
4210         for (i = 0; i < bp->cp_nr_rings; i++) {
4211                 struct bnxt_napi *bnapi = bp->bnapi[i];
4212                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4213
4214                 bnxt_free_stats_mem(bp, &cpr->stats);
4215         }
4216 }
4217
4218 static int bnxt_alloc_stats(struct bnxt *bp)
4219 {
4220         u32 size, i;
4221         int rc;
4222
4223         size = bp->hw_ring_stats_size;
4224
4225         for (i = 0; i < bp->cp_nr_rings; i++) {
4226                 struct bnxt_napi *bnapi = bp->bnapi[i];
4227                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4228
4229                 cpr->stats.len = size;
4230                 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4231                 if (rc)
4232                         return rc;
4233
4234                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4235         }
4236
4237         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4238                 return 0;
4239
4240         if (bp->port_stats.hw_stats)
4241                 goto alloc_ext_stats;
4242
4243         bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4244         rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4245         if (rc)
4246                 return rc;
4247
4248         bp->flags |= BNXT_FLAG_PORT_STATS;
4249
4250 alloc_ext_stats:
4251         /* Display extended statistics only if FW supports it */
4252         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4253                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4254                         return 0;
4255
4256         if (bp->rx_port_stats_ext.hw_stats)
4257                 goto alloc_tx_ext_stats;
4258
4259         bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4260         rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4261         /* Extended stats are optional */
4262         if (rc)
4263                 return 0;
4264
4265 alloc_tx_ext_stats:
4266         if (bp->tx_port_stats_ext.hw_stats)
4267                 return 0;
4268
4269         if (bp->hwrm_spec_code >= 0x10902 ||
4270             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4271                 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4272                 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4273                 /* Extended stats are optional */
4274                 if (rc)
4275                         return 0;
4276         }
4277         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4278         return 0;
4279 }
4280
4281 static void bnxt_clear_ring_indices(struct bnxt *bp)
4282 {
4283         int i;
4284
4285         if (!bp->bnapi)
4286                 return;
4287
4288         for (i = 0; i < bp->cp_nr_rings; i++) {
4289                 struct bnxt_napi *bnapi = bp->bnapi[i];
4290                 struct bnxt_cp_ring_info *cpr;
4291                 struct bnxt_rx_ring_info *rxr;
4292                 struct bnxt_tx_ring_info *txr;
4293
4294                 if (!bnapi)
4295                         continue;
4296
4297                 cpr = &bnapi->cp_ring;
4298                 cpr->cp_raw_cons = 0;
4299
4300                 txr = bnapi->tx_ring;
4301                 if (txr) {
4302                         txr->tx_prod = 0;
4303                         txr->tx_cons = 0;
4304                 }
4305
4306                 rxr = bnapi->rx_ring;
4307                 if (rxr) {
4308                         rxr->rx_prod = 0;
4309                         rxr->rx_agg_prod = 0;
4310                         rxr->rx_sw_agg_prod = 0;
4311                         rxr->rx_next_cons = 0;
4312                 }
4313         }
4314 }
4315
4316 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4317 {
4318 #ifdef CONFIG_RFS_ACCEL
4319         int i;
4320
4321         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
4322          * safe to delete the hash table.
4323          */
4324         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4325                 struct hlist_head *head;
4326                 struct hlist_node *tmp;
4327                 struct bnxt_ntuple_filter *fltr;
4328
4329                 head = &bp->ntp_fltr_hash_tbl[i];
4330                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4331                         hlist_del(&fltr->hash);
4332                         kfree(fltr);
4333                 }
4334         }
4335         if (irq_reinit) {
4336                 kfree(bp->ntp_fltr_bmap);
4337                 bp->ntp_fltr_bmap = NULL;
4338         }
4339         bp->ntp_fltr_count = 0;
4340 #endif
4341 }
4342
4343 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4344 {
4345 #ifdef CONFIG_RFS_ACCEL
4346         int i, rc = 0;
4347
4348         if (!(bp->flags & BNXT_FLAG_RFS))
4349                 return 0;
4350
4351         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4352                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4353
4354         bp->ntp_fltr_count = 0;
4355         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4356                                     sizeof(long),
4357                                     GFP_KERNEL);
4358
4359         if (!bp->ntp_fltr_bmap)
4360                 rc = -ENOMEM;
4361
4362         return rc;
4363 #else
4364         return 0;
4365 #endif
4366 }
4367
4368 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4369 {
4370         bnxt_free_vnic_attributes(bp);
4371         bnxt_free_tx_rings(bp);
4372         bnxt_free_rx_rings(bp);
4373         bnxt_free_cp_rings(bp);
4374         bnxt_free_all_cp_arrays(bp);
4375         bnxt_free_ntp_fltrs(bp, irq_re_init);
4376         if (irq_re_init) {
4377                 bnxt_free_ring_stats(bp);
4378                 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4379                     test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4380                         bnxt_free_port_stats(bp);
4381                 bnxt_free_ring_grps(bp);
4382                 bnxt_free_vnics(bp);
4383                 kfree(bp->tx_ring_map);
4384                 bp->tx_ring_map = NULL;
4385                 kfree(bp->tx_ring);
4386                 bp->tx_ring = NULL;
4387                 kfree(bp->rx_ring);
4388                 bp->rx_ring = NULL;
4389                 kfree(bp->bnapi);
4390                 bp->bnapi = NULL;
4391         } else {
4392                 bnxt_clear_ring_indices(bp);
4393         }
4394 }
4395
4396 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4397 {
4398         int i, j, rc, size, arr_size;
4399         void *bnapi;
4400
4401         if (irq_re_init) {
4402                 /* Allocate bnapi mem pointer array and mem block for
4403                  * all queues
4404                  */
4405                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4406                                 bp->cp_nr_rings);
4407                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4408                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4409                 if (!bnapi)
4410                         return -ENOMEM;
4411
4412                 bp->bnapi = bnapi;
4413                 bnapi += arr_size;
4414                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4415                         bp->bnapi[i] = bnapi;
4416                         bp->bnapi[i]->index = i;
4417                         bp->bnapi[i]->bp = bp;
4418                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4419                                 struct bnxt_cp_ring_info *cpr =
4420                                         &bp->bnapi[i]->cp_ring;
4421
4422                                 cpr->cp_ring_struct.ring_mem.flags =
4423                                         BNXT_RMEM_RING_PTE_FLAG;
4424                         }
4425                 }
4426
4427                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4428                                       sizeof(struct bnxt_rx_ring_info),
4429                                       GFP_KERNEL);
4430                 if (!bp->rx_ring)
4431                         return -ENOMEM;
4432
4433                 for (i = 0; i < bp->rx_nr_rings; i++) {
4434                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4435
4436                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4437                                 rxr->rx_ring_struct.ring_mem.flags =
4438                                         BNXT_RMEM_RING_PTE_FLAG;
4439                                 rxr->rx_agg_ring_struct.ring_mem.flags =
4440                                         BNXT_RMEM_RING_PTE_FLAG;
4441                         }
4442                         rxr->bnapi = bp->bnapi[i];
4443                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4444                 }
4445
4446                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4447                                       sizeof(struct bnxt_tx_ring_info),
4448                                       GFP_KERNEL);
4449                 if (!bp->tx_ring)
4450                         return -ENOMEM;
4451
4452                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4453                                           GFP_KERNEL);
4454
4455                 if (!bp->tx_ring_map)
4456                         return -ENOMEM;
4457
4458                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4459                         j = 0;
4460                 else
4461                         j = bp->rx_nr_rings;
4462
4463                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4464                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4465
4466                         if (bp->flags & BNXT_FLAG_CHIP_P5)
4467                                 txr->tx_ring_struct.ring_mem.flags =
4468                                         BNXT_RMEM_RING_PTE_FLAG;
4469                         txr->bnapi = bp->bnapi[j];
4470                         bp->bnapi[j]->tx_ring = txr;
4471                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4472                         if (i >= bp->tx_nr_rings_xdp) {
4473                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4474                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
4475                         } else {
4476                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4477                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4478                         }
4479                 }
4480
4481                 rc = bnxt_alloc_stats(bp);
4482                 if (rc)
4483                         goto alloc_mem_err;
4484                 bnxt_init_stats(bp);
4485
4486                 rc = bnxt_alloc_ntp_fltrs(bp);
4487                 if (rc)
4488                         goto alloc_mem_err;
4489
4490                 rc = bnxt_alloc_vnics(bp);
4491                 if (rc)
4492                         goto alloc_mem_err;
4493         }
4494
4495         rc = bnxt_alloc_all_cp_arrays(bp);
4496         if (rc)
4497                 goto alloc_mem_err;
4498
4499         bnxt_init_ring_struct(bp);
4500
4501         rc = bnxt_alloc_rx_rings(bp);
4502         if (rc)
4503                 goto alloc_mem_err;
4504
4505         rc = bnxt_alloc_tx_rings(bp);
4506         if (rc)
4507                 goto alloc_mem_err;
4508
4509         rc = bnxt_alloc_cp_rings(bp);
4510         if (rc)
4511                 goto alloc_mem_err;
4512
4513         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4514                                   BNXT_VNIC_UCAST_FLAG;
4515         rc = bnxt_alloc_vnic_attributes(bp);
4516         if (rc)
4517                 goto alloc_mem_err;
4518         return 0;
4519
4520 alloc_mem_err:
4521         bnxt_free_mem(bp, true);
4522         return rc;
4523 }
4524
4525 static void bnxt_disable_int(struct bnxt *bp)
4526 {
4527         int i;
4528
4529         if (!bp->bnapi)
4530                 return;
4531
4532         for (i = 0; i < bp->cp_nr_rings; i++) {
4533                 struct bnxt_napi *bnapi = bp->bnapi[i];
4534                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4535                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4536
4537                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4538                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4539         }
4540 }
4541
4542 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4543 {
4544         struct bnxt_napi *bnapi = bp->bnapi[n];
4545         struct bnxt_cp_ring_info *cpr;
4546
4547         cpr = &bnapi->cp_ring;
4548         return cpr->cp_ring_struct.map_idx;
4549 }
4550
4551 static void bnxt_disable_int_sync(struct bnxt *bp)
4552 {
4553         int i;
4554
4555         if (!bp->irq_tbl)
4556                 return;
4557
4558         atomic_inc(&bp->intr_sem);
4559
4560         bnxt_disable_int(bp);
4561         for (i = 0; i < bp->cp_nr_rings; i++) {
4562                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4563
4564                 synchronize_irq(bp->irq_tbl[map_idx].vector);
4565         }
4566 }
4567
4568 static void bnxt_enable_int(struct bnxt *bp)
4569 {
4570         int i;
4571
4572         atomic_set(&bp->intr_sem, 0);
4573         for (i = 0; i < bp->cp_nr_rings; i++) {
4574                 struct bnxt_napi *bnapi = bp->bnapi[i];
4575                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4576
4577                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4578         }
4579 }
4580
4581 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4582                             bool async_only)
4583 {
4584         DECLARE_BITMAP(async_events_bmap, 256);
4585         u32 *events = (u32 *)async_events_bmap;
4586         struct hwrm_func_drv_rgtr_output *resp;
4587         struct hwrm_func_drv_rgtr_input *req;
4588         u32 flags;
4589         int rc, i;
4590
4591         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4592         if (rc)
4593                 return rc;
4594
4595         req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4596                                    FUNC_DRV_RGTR_REQ_ENABLES_VER |
4597                                    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4598
4599         req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4600         flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4601         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4602                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4603         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4604                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4605                          FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4606         req->flags = cpu_to_le32(flags);
4607         req->ver_maj_8b = DRV_VER_MAJ;
4608         req->ver_min_8b = DRV_VER_MIN;
4609         req->ver_upd_8b = DRV_VER_UPD;
4610         req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4611         req->ver_min = cpu_to_le16(DRV_VER_MIN);
4612         req->ver_upd = cpu_to_le16(DRV_VER_UPD);
4613
4614         if (BNXT_PF(bp)) {
4615                 u32 data[8];
4616                 int i;
4617
4618                 memset(data, 0, sizeof(data));
4619                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4620                         u16 cmd = bnxt_vf_req_snif[i];
4621                         unsigned int bit, idx;
4622
4623                         idx = cmd / 32;
4624                         bit = cmd % 32;
4625                         data[idx] |= 1 << bit;
4626                 }
4627
4628                 for (i = 0; i < 8; i++)
4629                         req->vf_req_fwd[i] = cpu_to_le32(data[i]);
4630
4631                 req->enables |=
4632                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4633         }
4634
4635         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4636                 req->flags |= cpu_to_le32(
4637                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4638
4639         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4640         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4641                 u16 event_id = bnxt_async_events_arr[i];
4642
4643                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4644                     !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4645                         continue;
4646                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4647         }
4648         if (bmap && bmap_size) {
4649                 for (i = 0; i < bmap_size; i++) {
4650                         if (test_bit(i, bmap))
4651                                 __set_bit(i, async_events_bmap);
4652                 }
4653         }
4654         for (i = 0; i < 8; i++)
4655                 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
4656
4657         if (async_only)
4658                 req->enables =
4659                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4660
4661         resp = hwrm_req_hold(bp, req);
4662         rc = hwrm_req_send(bp, req);
4663         if (!rc) {
4664                 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4665                 if (resp->flags &
4666                     cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4667                         bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4668         }
4669         hwrm_req_drop(bp, req);
4670         return rc;
4671 }
4672
4673 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4674 {
4675         struct hwrm_func_drv_unrgtr_input *req;
4676         int rc;
4677
4678         if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4679                 return 0;
4680
4681         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4682         if (rc)
4683                 return rc;
4684         return hwrm_req_send(bp, req);
4685 }
4686
4687 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4688 {
4689         struct hwrm_tunnel_dst_port_free_input *req;
4690         int rc;
4691
4692         if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
4693             bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
4694                 return 0;
4695         if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
4696             bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
4697                 return 0;
4698
4699         rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4700         if (rc)
4701                 return rc;
4702
4703         req->tunnel_type = tunnel_type;
4704
4705         switch (tunnel_type) {
4706         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4707                 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4708                 bp->vxlan_port = 0;
4709                 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4710                 break;
4711         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4712                 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4713                 bp->nge_port = 0;
4714                 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4715                 break;
4716         default:
4717                 break;
4718         }
4719
4720         rc = hwrm_req_send(bp, req);
4721         if (rc)
4722                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4723                            rc);
4724         return rc;
4725 }
4726
4727 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4728                                            u8 tunnel_type)
4729 {
4730         struct hwrm_tunnel_dst_port_alloc_output *resp;
4731         struct hwrm_tunnel_dst_port_alloc_input *req;
4732         int rc;
4733
4734         rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4735         if (rc)
4736                 return rc;
4737
4738         req->tunnel_type = tunnel_type;
4739         req->tunnel_dst_port_val = port;
4740
4741         resp = hwrm_req_hold(bp, req);
4742         rc = hwrm_req_send(bp, req);
4743         if (rc) {
4744                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4745                            rc);
4746                 goto err_out;
4747         }
4748
4749         switch (tunnel_type) {
4750         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4751                 bp->vxlan_port = port;
4752                 bp->vxlan_fw_dst_port_id =
4753                         le16_to_cpu(resp->tunnel_dst_port_id);
4754                 break;
4755         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4756                 bp->nge_port = port;
4757                 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4758                 break;
4759         default:
4760                 break;
4761         }
4762
4763 err_out:
4764         hwrm_req_drop(bp, req);
4765         return rc;
4766 }
4767
4768 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4769 {
4770         struct hwrm_cfa_l2_set_rx_mask_input *req;
4771         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4772         int rc;
4773
4774         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4775         if (rc)
4776                 return rc;
4777
4778         req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4779         if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
4780                 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4781                 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4782         }
4783         req->mask = cpu_to_le32(vnic->rx_mask);
4784         return hwrm_req_send_silent(bp, req);
4785 }
4786
4787 #ifdef CONFIG_RFS_ACCEL
4788 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4789                                             struct bnxt_ntuple_filter *fltr)
4790 {
4791         struct hwrm_cfa_ntuple_filter_free_input *req;
4792         int rc;
4793
4794         rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4795         if (rc)
4796                 return rc;
4797
4798         req->ntuple_filter_id = fltr->filter_id;
4799         return hwrm_req_send(bp, req);
4800 }
4801
4802 #define BNXT_NTP_FLTR_FLAGS                                     \
4803         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4804          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4805          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4806          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4807          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4808          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4809          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4810          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4811          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4812          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4813          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4814          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4815          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4816          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4817
4818 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4819                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4820
4821 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4822                                              struct bnxt_ntuple_filter *fltr)
4823 {
4824         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4825         struct hwrm_cfa_ntuple_filter_alloc_input *req;
4826         struct flow_keys *keys = &fltr->fkeys;
4827         struct bnxt_vnic_info *vnic;
4828         u32 flags = 0;
4829         int rc;
4830
4831         rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4832         if (rc)
4833                 return rc;
4834
4835         req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4836
4837         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4838                 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4839                 req->dst_id = cpu_to_le16(fltr->rxq);
4840         } else {
4841                 vnic = &bp->vnic_info[fltr->rxq + 1];
4842                 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
4843         }
4844         req->flags = cpu_to_le32(flags);
4845         req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4846
4847         req->ethertype = htons(ETH_P_IP);
4848         memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4849         req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4850         req->ip_protocol = keys->basic.ip_proto;
4851
4852         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4853                 int i;
4854
4855                 req->ethertype = htons(ETH_P_IPV6);
4856                 req->ip_addr_type =
4857                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4858                 *(struct in6_addr *)&req->src_ipaddr[0] =
4859                         keys->addrs.v6addrs.src;
4860                 *(struct in6_addr *)&req->dst_ipaddr[0] =
4861                         keys->addrs.v6addrs.dst;
4862                 for (i = 0; i < 4; i++) {
4863                         req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4864                         req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4865                 }
4866         } else {
4867                 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
4868                 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4869                 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4870                 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4871         }
4872         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4873                 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4874                 req->tunnel_type =
4875                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4876         }
4877
4878         req->src_port = keys->ports.src;
4879         req->src_port_mask = cpu_to_be16(0xffff);
4880         req->dst_port = keys->ports.dst;
4881         req->dst_port_mask = cpu_to_be16(0xffff);
4882
4883         resp = hwrm_req_hold(bp, req);
4884         rc = hwrm_req_send(bp, req);
4885         if (!rc)
4886                 fltr->filter_id = resp->ntuple_filter_id;
4887         hwrm_req_drop(bp, req);
4888         return rc;
4889 }
4890 #endif
4891
4892 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4893                                      const u8 *mac_addr)
4894 {
4895         struct hwrm_cfa_l2_filter_alloc_output *resp;
4896         struct hwrm_cfa_l2_filter_alloc_input *req;
4897         int rc;
4898
4899         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
4900         if (rc)
4901                 return rc;
4902
4903         req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4904         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4905                 req->flags |=
4906                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4907         req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4908         req->enables =
4909                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4910                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4911                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4912         memcpy(req->l2_addr, mac_addr, ETH_ALEN);
4913         req->l2_addr_mask[0] = 0xff;
4914         req->l2_addr_mask[1] = 0xff;
4915         req->l2_addr_mask[2] = 0xff;
4916         req->l2_addr_mask[3] = 0xff;
4917         req->l2_addr_mask[4] = 0xff;
4918         req->l2_addr_mask[5] = 0xff;
4919
4920         resp = hwrm_req_hold(bp, req);
4921         rc = hwrm_req_send(bp, req);
4922         if (!rc)
4923                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4924                                                         resp->l2_filter_id;
4925         hwrm_req_drop(bp, req);
4926         return rc;
4927 }
4928
4929 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4930 {
4931         struct hwrm_cfa_l2_filter_free_input *req;
4932         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4933         int rc;
4934
4935         /* Any associated ntuple filters will also be cleared by firmware. */
4936         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
4937         if (rc)
4938                 return rc;
4939         hwrm_req_hold(bp, req);
4940         for (i = 0; i < num_of_vnics; i++) {
4941                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4942
4943                 for (j = 0; j < vnic->uc_filter_count; j++) {
4944                         req->l2_filter_id = vnic->fw_l2_filter_id[j];
4945
4946                         rc = hwrm_req_send(bp, req);
4947                 }
4948                 vnic->uc_filter_count = 0;
4949         }
4950         hwrm_req_drop(bp, req);
4951         return rc;
4952 }
4953
4954 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4955 {
4956         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4957         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4958         struct hwrm_vnic_tpa_cfg_input *req;
4959         int rc;
4960
4961         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4962                 return 0;
4963
4964         rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
4965         if (rc)
4966                 return rc;
4967
4968         if (tpa_flags) {
4969                 u16 mss = bp->dev->mtu - 40;
4970                 u32 nsegs, n, segs = 0, flags;
4971
4972                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4973                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4974                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4975                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4976                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4977                 if (tpa_flags & BNXT_FLAG_GRO)
4978                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4979
4980                 req->flags = cpu_to_le32(flags);
4981
4982                 req->enables =
4983                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4984                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4985                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4986
4987                 /* Number of segs are log2 units, and first packet is not
4988                  * included as part of this units.
4989                  */
4990                 if (mss <= BNXT_RX_PAGE_SIZE) {
4991                         n = BNXT_RX_PAGE_SIZE / mss;
4992                         nsegs = (MAX_SKB_FRAGS - 1) * n;
4993                 } else {
4994                         n = mss / BNXT_RX_PAGE_SIZE;
4995                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
4996                                 n++;
4997                         nsegs = (MAX_SKB_FRAGS - n) / n;
4998                 }
4999
5000                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5001                         segs = MAX_TPA_SEGS_P5;
5002                         max_aggs = bp->max_tpa;
5003                 } else {
5004                         segs = ilog2(nsegs);
5005                 }
5006                 req->max_agg_segs = cpu_to_le16(segs);
5007                 req->max_aggs = cpu_to_le16(max_aggs);
5008
5009                 req->min_agg_len = cpu_to_le32(512);
5010         }
5011         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5012
5013         return hwrm_req_send(bp, req);
5014 }
5015
5016 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5017 {
5018         struct bnxt_ring_grp_info *grp_info;
5019
5020         grp_info = &bp->grp_info[ring->grp_idx];
5021         return grp_info->cp_fw_ring_id;
5022 }
5023
5024 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5025 {
5026         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5027                 struct bnxt_napi *bnapi = rxr->bnapi;
5028                 struct bnxt_cp_ring_info *cpr;
5029
5030                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5031                 return cpr->cp_ring_struct.fw_ring_id;
5032         } else {
5033                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5034         }
5035 }
5036
5037 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5038 {
5039         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5040                 struct bnxt_napi *bnapi = txr->bnapi;
5041                 struct bnxt_cp_ring_info *cpr;
5042
5043                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5044                 return cpr->cp_ring_struct.fw_ring_id;
5045         } else {
5046                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5047         }
5048 }
5049
5050 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5051 {
5052         int entries;
5053
5054         if (bp->flags & BNXT_FLAG_CHIP_P5)
5055                 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5056         else
5057                 entries = HW_HASH_INDEX_SIZE;
5058
5059         bp->rss_indir_tbl_entries = entries;
5060         bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5061                                           GFP_KERNEL);
5062         if (!bp->rss_indir_tbl)
5063                 return -ENOMEM;
5064         return 0;
5065 }
5066
5067 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5068 {
5069         u16 max_rings, max_entries, pad, i;
5070
5071         if (!bp->rx_nr_rings)
5072                 return;
5073
5074         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5075                 max_rings = bp->rx_nr_rings - 1;
5076         else
5077                 max_rings = bp->rx_nr_rings;
5078
5079         max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5080
5081         for (i = 0; i < max_entries; i++)
5082                 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5083
5084         pad = bp->rss_indir_tbl_entries - max_entries;
5085         if (pad)
5086                 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5087 }
5088
5089 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5090 {
5091         u16 i, tbl_size, max_ring = 0;
5092
5093         if (!bp->rss_indir_tbl)
5094                 return 0;
5095
5096         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5097         for (i = 0; i < tbl_size; i++)
5098                 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5099         return max_ring;
5100 }
5101
5102 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5103 {
5104         if (bp->flags & BNXT_FLAG_CHIP_P5)
5105                 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5106         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5107                 return 2;
5108         return 1;
5109 }
5110
5111 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5112 {
5113         bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5114         u16 i, j;
5115
5116         /* Fill the RSS indirection table with ring group ids */
5117         for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5118                 if (!no_rss)
5119                         j = bp->rss_indir_tbl[i];
5120                 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5121         }
5122 }
5123
5124 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5125                                       struct bnxt_vnic_info *vnic)
5126 {
5127         __le16 *ring_tbl = vnic->rss_table;
5128         struct bnxt_rx_ring_info *rxr;
5129         u16 tbl_size, i;
5130
5131         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5132
5133         for (i = 0; i < tbl_size; i++) {
5134                 u16 ring_id, j;
5135
5136                 j = bp->rss_indir_tbl[i];
5137                 rxr = &bp->rx_ring[j];
5138
5139                 ring_id = rxr->rx_ring_struct.fw_ring_id;
5140                 *ring_tbl++ = cpu_to_le16(ring_id);
5141                 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5142                 *ring_tbl++ = cpu_to_le16(ring_id);
5143         }
5144 }
5145
5146 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5147 {
5148         if (bp->flags & BNXT_FLAG_CHIP_P5)
5149                 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5150         else
5151                 __bnxt_fill_hw_rss_tbl(bp, vnic);
5152 }
5153
5154 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5155 {
5156         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5157         struct hwrm_vnic_rss_cfg_input *req;
5158         int rc;
5159
5160         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5161             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5162                 return 0;
5163
5164         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5165         if (rc)
5166                 return rc;
5167
5168         if (set_rss) {
5169                 bnxt_fill_hw_rss_tbl(bp, vnic);
5170                 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5171                 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5172                 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5173                 req->hash_key_tbl_addr =
5174                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
5175         }
5176         req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5177         return hwrm_req_send(bp, req);
5178 }
5179
5180 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5181 {
5182         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5183         struct hwrm_vnic_rss_cfg_input *req;
5184         dma_addr_t ring_tbl_map;
5185         u32 i, nr_ctxs;
5186         int rc;
5187
5188         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5189         if (rc)
5190                 return rc;
5191
5192         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5193         if (!set_rss)
5194                 return hwrm_req_send(bp, req);
5195
5196         bnxt_fill_hw_rss_tbl(bp, vnic);
5197         req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5198         req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5199         req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5200         ring_tbl_map = vnic->rss_table_dma_addr;
5201         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5202
5203         hwrm_req_hold(bp, req);
5204         for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5205                 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5206                 req->ring_table_pair_index = i;
5207                 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5208                 rc = hwrm_req_send(bp, req);
5209                 if (rc)
5210                         goto exit;
5211         }
5212
5213 exit:
5214         hwrm_req_drop(bp, req);
5215         return rc;
5216 }
5217
5218 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5219 {
5220         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5221         struct hwrm_vnic_plcmodes_cfg_input *req;
5222         int rc;
5223
5224         rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5225         if (rc)
5226                 return rc;
5227
5228         req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5229                                  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5230                                  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5231         req->enables =
5232                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5233                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5234         /* thresholds not implemented in firmware yet */
5235         req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5236         req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5237         req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5238         return hwrm_req_send(bp, req);
5239 }
5240
5241 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5242                                         u16 ctx_idx)
5243 {
5244         struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
5245
5246         if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5247                 return;
5248
5249         req->rss_cos_lb_ctx_id =
5250                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5251
5252         hwrm_req_send(bp, req);
5253         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5254 }
5255
5256 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5257 {
5258         int i, j;
5259
5260         for (i = 0; i < bp->nr_vnics; i++) {
5261                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5262
5263                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5264                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5265                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5266                 }
5267         }
5268         bp->rsscos_nr_ctxs = 0;
5269 }
5270
5271 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5272 {
5273         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5274         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
5275         int rc;
5276
5277         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5278         if (rc)
5279                 return rc;
5280
5281         resp = hwrm_req_hold(bp, req);
5282         rc = hwrm_req_send(bp, req);
5283         if (!rc)
5284                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5285                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
5286         hwrm_req_drop(bp, req);
5287
5288         return rc;
5289 }
5290
5291 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5292 {
5293         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5294                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5295         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5296 }
5297
5298 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5299 {
5300         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5301         struct hwrm_vnic_cfg_input *req;
5302         unsigned int ring = 0, grp_idx;
5303         u16 def_vlan = 0;
5304         int rc;
5305
5306         rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5307         if (rc)
5308                 return rc;
5309
5310         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5311                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5312
5313                 req->default_rx_ring_id =
5314                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5315                 req->default_cmpl_ring_id =
5316                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5317                 req->enables =
5318                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5319                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5320                 goto vnic_mru;
5321         }
5322         req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5323         /* Only RSS support for now TBD: COS & LB */
5324         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5325                 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5326                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5327                                            VNIC_CFG_REQ_ENABLES_MRU);
5328         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5329                 req->rss_rule =
5330                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5331                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5332                                            VNIC_CFG_REQ_ENABLES_MRU);
5333                 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5334         } else {
5335                 req->rss_rule = cpu_to_le16(0xffff);
5336         }
5337
5338         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5339             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5340                 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5341                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5342         } else {
5343                 req->cos_rule = cpu_to_le16(0xffff);
5344         }
5345
5346         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5347                 ring = 0;
5348         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5349                 ring = vnic_id - 1;
5350         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5351                 ring = bp->rx_nr_rings - 1;
5352
5353         grp_idx = bp->rx_ring[ring].bnapi->index;
5354         req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5355         req->lb_rule = cpu_to_le16(0xffff);
5356 vnic_mru:
5357         req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5358
5359         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5360 #ifdef CONFIG_BNXT_SRIOV
5361         if (BNXT_VF(bp))
5362                 def_vlan = bp->vf.vlan;
5363 #endif
5364         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5365                 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5366         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5367                 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5368
5369         return hwrm_req_send(bp, req);
5370 }
5371
5372 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5373 {
5374         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5375                 struct hwrm_vnic_free_input *req;
5376
5377                 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5378                         return;
5379
5380                 req->vnic_id =
5381                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5382
5383                 hwrm_req_send(bp, req);
5384                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5385         }
5386 }
5387
5388 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5389 {
5390         u16 i;
5391
5392         for (i = 0; i < bp->nr_vnics; i++)
5393                 bnxt_hwrm_vnic_free_one(bp, i);
5394 }
5395
5396 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5397                                 unsigned int start_rx_ring_idx,
5398                                 unsigned int nr_rings)
5399 {
5400         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5401         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5402         struct hwrm_vnic_alloc_output *resp;
5403         struct hwrm_vnic_alloc_input *req;
5404         int rc;
5405
5406         rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5407         if (rc)
5408                 return rc;
5409
5410         if (bp->flags & BNXT_FLAG_CHIP_P5)
5411                 goto vnic_no_ring_grps;
5412
5413         /* map ring groups to this vnic */
5414         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5415                 grp_idx = bp->rx_ring[i].bnapi->index;
5416                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5417                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5418                                    j, nr_rings);
5419                         break;
5420                 }
5421                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5422         }
5423
5424 vnic_no_ring_grps:
5425         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5426                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5427         if (vnic_id == 0)
5428                 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5429
5430         resp = hwrm_req_hold(bp, req);
5431         rc = hwrm_req_send(bp, req);
5432         if (!rc)
5433                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5434         hwrm_req_drop(bp, req);
5435         return rc;
5436 }
5437
5438 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5439 {
5440         struct hwrm_vnic_qcaps_output *resp;
5441         struct hwrm_vnic_qcaps_input *req;
5442         int rc;
5443
5444         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5445         bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5446         if (bp->hwrm_spec_code < 0x10600)
5447                 return 0;
5448
5449         rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5450         if (rc)
5451                 return rc;
5452
5453         resp = hwrm_req_hold(bp, req);
5454         rc = hwrm_req_send(bp, req);
5455         if (!rc) {
5456                 u32 flags = le32_to_cpu(resp->flags);
5457
5458                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5459                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5460                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5461                 if (flags &
5462                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5463                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5464
5465                 /* Older P5 fw before EXT_HW_STATS support did not set
5466                  * VLAN_STRIP_CAP properly.
5467                  */
5468                 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5469                     (BNXT_CHIP_P5_THOR(bp) &&
5470                      !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5471                         bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5472                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5473                 if (bp->max_tpa_v2) {
5474                         if (BNXT_CHIP_P5_THOR(bp))
5475                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5476                         else
5477                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5478                 }
5479         }
5480         hwrm_req_drop(bp, req);
5481         return rc;
5482 }
5483
5484 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5485 {
5486         struct hwrm_ring_grp_alloc_output *resp;
5487         struct hwrm_ring_grp_alloc_input *req;
5488         int rc;
5489         u16 i;
5490
5491         if (bp->flags & BNXT_FLAG_CHIP_P5)
5492                 return 0;
5493
5494         rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5495         if (rc)
5496                 return rc;
5497
5498         resp = hwrm_req_hold(bp, req);
5499         for (i = 0; i < bp->rx_nr_rings; i++) {
5500                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5501
5502                 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5503                 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5504                 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5505                 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5506
5507                 rc = hwrm_req_send(bp, req);
5508
5509                 if (rc)
5510                         break;
5511
5512                 bp->grp_info[grp_idx].fw_grp_id =
5513                         le32_to_cpu(resp->ring_group_id);
5514         }
5515         hwrm_req_drop(bp, req);
5516         return rc;
5517 }
5518
5519 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5520 {
5521         struct hwrm_ring_grp_free_input *req;
5522         u16 i;
5523
5524         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5525                 return;
5526
5527         if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5528                 return;
5529
5530         hwrm_req_hold(bp, req);
5531         for (i = 0; i < bp->cp_nr_rings; i++) {
5532                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5533                         continue;
5534                 req->ring_group_id =
5535                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
5536
5537                 hwrm_req_send(bp, req);
5538                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5539         }
5540         hwrm_req_drop(bp, req);
5541 }
5542
5543 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5544                                     struct bnxt_ring_struct *ring,
5545                                     u32 ring_type, u32 map_index)
5546 {
5547         struct hwrm_ring_alloc_output *resp;
5548         struct hwrm_ring_alloc_input *req;
5549         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5550         struct bnxt_ring_grp_info *grp_info;
5551         int rc, err = 0;
5552         u16 ring_id;
5553
5554         rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5555         if (rc)
5556                 goto exit;
5557
5558         req->enables = 0;
5559         if (rmem->nr_pages > 1) {
5560                 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5561                 /* Page size is in log2 units */
5562                 req->page_size = BNXT_PAGE_SHIFT;
5563                 req->page_tbl_depth = 1;
5564         } else {
5565                 req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5566         }
5567         req->fbo = 0;
5568         /* Association of ring index with doorbell index and MSIX number */
5569         req->logical_id = cpu_to_le16(map_index);
5570
5571         switch (ring_type) {
5572         case HWRM_RING_ALLOC_TX: {
5573                 struct bnxt_tx_ring_info *txr;
5574
5575                 txr = container_of(ring, struct bnxt_tx_ring_info,
5576                                    tx_ring_struct);
5577                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5578                 /* Association of transmit ring with completion ring */
5579                 grp_info = &bp->grp_info[ring->grp_idx];
5580                 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5581                 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5582                 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5583                 req->queue_id = cpu_to_le16(ring->queue_id);
5584                 break;
5585         }
5586         case HWRM_RING_ALLOC_RX:
5587                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5588                 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
5589                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5590                         u16 flags = 0;
5591
5592                         /* Association of rx ring with stats context */
5593                         grp_info = &bp->grp_info[ring->grp_idx];
5594                         req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5595                         req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5596                         req->enables |= cpu_to_le32(
5597                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5598                         if (NET_IP_ALIGN == 2)
5599                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5600                         req->flags = cpu_to_le16(flags);
5601                 }
5602                 break;
5603         case HWRM_RING_ALLOC_AGG:
5604                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5605                         req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5606                         /* Association of agg ring with rx ring */
5607                         grp_info = &bp->grp_info[ring->grp_idx];
5608                         req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5609                         req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5610                         req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5611                         req->enables |= cpu_to_le32(
5612                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5613                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5614                 } else {
5615                         req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5616                 }
5617                 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5618                 break;
5619         case HWRM_RING_ALLOC_CMPL:
5620                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5621                 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5622                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5623                         /* Association of cp ring with nq */
5624                         grp_info = &bp->grp_info[map_index];
5625                         req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5626                         req->cq_handle = cpu_to_le64(ring->handle);
5627                         req->enables |= cpu_to_le32(
5628                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5629                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5630                         req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5631                 }
5632                 break;
5633         case HWRM_RING_ALLOC_NQ:
5634                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5635                 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5636                 if (bp->flags & BNXT_FLAG_USING_MSIX)
5637                         req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5638                 break;
5639         default:
5640                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5641                            ring_type);
5642                 return -1;
5643         }
5644
5645         resp = hwrm_req_hold(bp, req);
5646         rc = hwrm_req_send(bp, req);
5647         err = le16_to_cpu(resp->error_code);
5648         ring_id = le16_to_cpu(resp->ring_id);
5649         hwrm_req_drop(bp, req);
5650
5651 exit:
5652         if (rc || err) {
5653                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5654                            ring_type, rc, err);
5655                 return -EIO;
5656         }
5657         ring->fw_ring_id = ring_id;
5658         return rc;
5659 }
5660
5661 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5662 {
5663         int rc;
5664
5665         if (BNXT_PF(bp)) {
5666                 struct hwrm_func_cfg_input *req;
5667
5668                 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5669                 if (rc)
5670                         return rc;
5671
5672                 req->fid = cpu_to_le16(0xffff);
5673                 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5674                 req->async_event_cr = cpu_to_le16(idx);
5675                 return hwrm_req_send(bp, req);
5676         } else {
5677                 struct hwrm_func_vf_cfg_input *req;
5678
5679                 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5680                 if (rc)
5681                         return rc;
5682
5683                 req->enables =
5684                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5685                 req->async_event_cr = cpu_to_le16(idx);
5686                 return hwrm_req_send(bp, req);
5687         }
5688 }
5689
5690 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5691                         u32 map_idx, u32 xid)
5692 {
5693         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5694                 if (BNXT_PF(bp))
5695                         db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5696                 else
5697                         db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5698                 switch (ring_type) {
5699                 case HWRM_RING_ALLOC_TX:
5700                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5701                         break;
5702                 case HWRM_RING_ALLOC_RX:
5703                 case HWRM_RING_ALLOC_AGG:
5704                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5705                         break;
5706                 case HWRM_RING_ALLOC_CMPL:
5707                         db->db_key64 = DBR_PATH_L2;
5708                         break;
5709                 case HWRM_RING_ALLOC_NQ:
5710                         db->db_key64 = DBR_PATH_L2;
5711                         break;
5712                 }
5713                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5714         } else {
5715                 db->doorbell = bp->bar1 + map_idx * 0x80;
5716                 switch (ring_type) {
5717                 case HWRM_RING_ALLOC_TX:
5718                         db->db_key32 = DB_KEY_TX;
5719                         break;
5720                 case HWRM_RING_ALLOC_RX:
5721                 case HWRM_RING_ALLOC_AGG:
5722                         db->db_key32 = DB_KEY_RX;
5723                         break;
5724                 case HWRM_RING_ALLOC_CMPL:
5725                         db->db_key32 = DB_KEY_CP;
5726                         break;
5727                 }
5728         }
5729 }
5730
5731 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5732 {
5733         bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5734         int i, rc = 0;
5735         u32 type;
5736
5737         if (bp->flags & BNXT_FLAG_CHIP_P5)
5738                 type = HWRM_RING_ALLOC_NQ;
5739         else
5740                 type = HWRM_RING_ALLOC_CMPL;
5741         for (i = 0; i < bp->cp_nr_rings; i++) {
5742                 struct bnxt_napi *bnapi = bp->bnapi[i];
5743                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5744                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5745                 u32 map_idx = ring->map_idx;
5746                 unsigned int vector;
5747
5748                 vector = bp->irq_tbl[map_idx].vector;
5749                 disable_irq_nosync(vector);
5750                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5751                 if (rc) {
5752                         enable_irq(vector);
5753                         goto err_out;
5754                 }
5755                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5756                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5757                 enable_irq(vector);
5758                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5759
5760                 if (!i) {
5761                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5762                         if (rc)
5763                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5764                 }
5765         }
5766
5767         type = HWRM_RING_ALLOC_TX;
5768         for (i = 0; i < bp->tx_nr_rings; i++) {
5769                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5770                 struct bnxt_ring_struct *ring;
5771                 u32 map_idx;
5772
5773                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5774                         struct bnxt_napi *bnapi = txr->bnapi;
5775                         struct bnxt_cp_ring_info *cpr, *cpr2;
5776                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5777
5778                         cpr = &bnapi->cp_ring;
5779                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5780                         ring = &cpr2->cp_ring_struct;
5781                         ring->handle = BNXT_TX_HDL;
5782                         map_idx = bnapi->index;
5783                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5784                         if (rc)
5785                                 goto err_out;
5786                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5787                                     ring->fw_ring_id);
5788                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5789                 }
5790                 ring = &txr->tx_ring_struct;
5791                 map_idx = i;
5792                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5793                 if (rc)
5794                         goto err_out;
5795                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5796         }
5797
5798         type = HWRM_RING_ALLOC_RX;
5799         for (i = 0; i < bp->rx_nr_rings; i++) {
5800                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5801                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5802                 struct bnxt_napi *bnapi = rxr->bnapi;
5803                 u32 map_idx = bnapi->index;
5804
5805                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5806                 if (rc)
5807                         goto err_out;
5808                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5809                 /* If we have agg rings, post agg buffers first. */
5810                 if (!agg_rings)
5811                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5812                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5813                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5814                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5815                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5816                         struct bnxt_cp_ring_info *cpr2;
5817
5818                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5819                         ring = &cpr2->cp_ring_struct;
5820                         ring->handle = BNXT_RX_HDL;
5821                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5822                         if (rc)
5823                                 goto err_out;
5824                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5825                                     ring->fw_ring_id);
5826                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5827                 }
5828         }
5829
5830         if (agg_rings) {
5831                 type = HWRM_RING_ALLOC_AGG;
5832                 for (i = 0; i < bp->rx_nr_rings; i++) {
5833                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5834                         struct bnxt_ring_struct *ring =
5835                                                 &rxr->rx_agg_ring_struct;
5836                         u32 grp_idx = ring->grp_idx;
5837                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5838
5839                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5840                         if (rc)
5841                                 goto err_out;
5842
5843                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5844                                     ring->fw_ring_id);
5845                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5846                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5847                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5848                 }
5849         }
5850 err_out:
5851         return rc;
5852 }
5853
5854 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5855                                    struct bnxt_ring_struct *ring,
5856                                    u32 ring_type, int cmpl_ring_id)
5857 {
5858         struct hwrm_ring_free_output *resp;
5859         struct hwrm_ring_free_input *req;
5860         u16 error_code = 0;
5861         int rc;
5862
5863         if (BNXT_NO_FW_ACCESS(bp))
5864                 return 0;
5865
5866         rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
5867         if (rc)
5868                 goto exit;
5869
5870         req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
5871         req->ring_type = ring_type;
5872         req->ring_id = cpu_to_le16(ring->fw_ring_id);
5873
5874         resp = hwrm_req_hold(bp, req);
5875         rc = hwrm_req_send(bp, req);
5876         error_code = le16_to_cpu(resp->error_code);
5877         hwrm_req_drop(bp, req);
5878 exit:
5879         if (rc || error_code) {
5880                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5881                            ring_type, rc, error_code);
5882                 return -EIO;
5883         }
5884         return 0;
5885 }
5886
5887 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5888 {
5889         u32 type;
5890         int i;
5891
5892         if (!bp->bnapi)
5893                 return;
5894
5895         for (i = 0; i < bp->tx_nr_rings; i++) {
5896                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5897                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5898
5899                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5900                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5901
5902                         hwrm_ring_free_send_msg(bp, ring,
5903                                                 RING_FREE_REQ_RING_TYPE_TX,
5904                                                 close_path ? cmpl_ring_id :
5905                                                 INVALID_HW_RING_ID);
5906                         ring->fw_ring_id = INVALID_HW_RING_ID;
5907                 }
5908         }
5909
5910         for (i = 0; i < bp->rx_nr_rings; i++) {
5911                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5912                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5913                 u32 grp_idx = rxr->bnapi->index;
5914
5915                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5916                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5917
5918                         hwrm_ring_free_send_msg(bp, ring,
5919                                                 RING_FREE_REQ_RING_TYPE_RX,
5920                                                 close_path ? cmpl_ring_id :
5921                                                 INVALID_HW_RING_ID);
5922                         ring->fw_ring_id = INVALID_HW_RING_ID;
5923                         bp->grp_info[grp_idx].rx_fw_ring_id =
5924                                 INVALID_HW_RING_ID;
5925                 }
5926         }
5927
5928         if (bp->flags & BNXT_FLAG_CHIP_P5)
5929                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5930         else
5931                 type = RING_FREE_REQ_RING_TYPE_RX;
5932         for (i = 0; i < bp->rx_nr_rings; i++) {
5933                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5934                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5935                 u32 grp_idx = rxr->bnapi->index;
5936
5937                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5938                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5939
5940                         hwrm_ring_free_send_msg(bp, ring, type,
5941                                                 close_path ? cmpl_ring_id :
5942                                                 INVALID_HW_RING_ID);
5943                         ring->fw_ring_id = INVALID_HW_RING_ID;
5944                         bp->grp_info[grp_idx].agg_fw_ring_id =
5945                                 INVALID_HW_RING_ID;
5946                 }
5947         }
5948
5949         /* The completion rings are about to be freed.  After that the
5950          * IRQ doorbell will not work anymore.  So we need to disable
5951          * IRQ here.
5952          */
5953         bnxt_disable_int_sync(bp);
5954
5955         if (bp->flags & BNXT_FLAG_CHIP_P5)
5956                 type = RING_FREE_REQ_RING_TYPE_NQ;
5957         else
5958                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5959         for (i = 0; i < bp->cp_nr_rings; i++) {
5960                 struct bnxt_napi *bnapi = bp->bnapi[i];
5961                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5962                 struct bnxt_ring_struct *ring;
5963                 int j;
5964
5965                 for (j = 0; j < 2; j++) {
5966                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5967
5968                         if (cpr2) {
5969                                 ring = &cpr2->cp_ring_struct;
5970                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5971                                         continue;
5972                                 hwrm_ring_free_send_msg(bp, ring,
5973                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
5974                                         INVALID_HW_RING_ID);
5975                                 ring->fw_ring_id = INVALID_HW_RING_ID;
5976                         }
5977                 }
5978                 ring = &cpr->cp_ring_struct;
5979                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5980                         hwrm_ring_free_send_msg(bp, ring, type,
5981                                                 INVALID_HW_RING_ID);
5982                         ring->fw_ring_id = INVALID_HW_RING_ID;
5983                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5984                 }
5985         }
5986 }
5987
5988 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5989                            bool shared);
5990
5991 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5992 {
5993         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5994         struct hwrm_func_qcfg_output *resp;
5995         struct hwrm_func_qcfg_input *req;
5996         int rc;
5997
5998         if (bp->hwrm_spec_code < 0x10601)
5999                 return 0;
6000
6001         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6002         if (rc)
6003                 return rc;
6004
6005         req->fid = cpu_to_le16(0xffff);
6006         resp = hwrm_req_hold(bp, req);
6007         rc = hwrm_req_send(bp, req);
6008         if (rc) {
6009                 hwrm_req_drop(bp, req);
6010                 return rc;
6011         }
6012
6013         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6014         if (BNXT_NEW_RM(bp)) {
6015                 u16 cp, stats;
6016
6017                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6018                 hw_resc->resv_hw_ring_grps =
6019                         le32_to_cpu(resp->alloc_hw_ring_grps);
6020                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6021                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6022                 stats = le16_to_cpu(resp->alloc_stat_ctx);
6023                 hw_resc->resv_irqs = cp;
6024                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6025                         int rx = hw_resc->resv_rx_rings;
6026                         int tx = hw_resc->resv_tx_rings;
6027
6028                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6029                                 rx >>= 1;
6030                         if (cp < (rx + tx)) {
6031                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6032                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6033                                         rx <<= 1;
6034                                 hw_resc->resv_rx_rings = rx;
6035                                 hw_resc->resv_tx_rings = tx;
6036                         }
6037                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6038                         hw_resc->resv_hw_ring_grps = rx;
6039                 }
6040                 hw_resc->resv_cp_rings = cp;
6041                 hw_resc->resv_stat_ctxs = stats;
6042         }
6043         hwrm_req_drop(bp, req);
6044         return 0;
6045 }
6046
6047 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6048 {
6049         struct hwrm_func_qcfg_output *resp;
6050         struct hwrm_func_qcfg_input *req;
6051         int rc;
6052
6053         if (bp->hwrm_spec_code < 0x10601)
6054                 return 0;
6055
6056         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6057         if (rc)
6058                 return rc;
6059
6060         req->fid = cpu_to_le16(fid);
6061         resp = hwrm_req_hold(bp, req);
6062         rc = hwrm_req_send(bp, req);
6063         if (!rc)
6064                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6065
6066         hwrm_req_drop(bp, req);
6067         return rc;
6068 }
6069
6070 static bool bnxt_rfs_supported(struct bnxt *bp);
6071
6072 static struct hwrm_func_cfg_input *
6073 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6074                              int ring_grps, int cp_rings, int stats, int vnics)
6075 {
6076         struct hwrm_func_cfg_input *req;
6077         u32 enables = 0;
6078
6079         if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6080                 return NULL;
6081
6082         req->fid = cpu_to_le16(0xffff);
6083         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6084         req->num_tx_rings = cpu_to_le16(tx_rings);
6085         if (BNXT_NEW_RM(bp)) {
6086                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6087                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6088                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6089                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6090                         enables |= tx_rings + ring_grps ?
6091                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6092                         enables |= rx_rings ?
6093                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6094                 } else {
6095                         enables |= cp_rings ?
6096                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6097                         enables |= ring_grps ?
6098                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6099                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6100                 }
6101                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6102
6103                 req->num_rx_rings = cpu_to_le16(rx_rings);
6104                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6105                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6106                         req->num_msix = cpu_to_le16(cp_rings);
6107                         req->num_rsscos_ctxs =
6108                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6109                 } else {
6110                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
6111                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6112                         req->num_rsscos_ctxs = cpu_to_le16(1);
6113                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6114                             bnxt_rfs_supported(bp))
6115                                 req->num_rsscos_ctxs =
6116                                         cpu_to_le16(ring_grps + 1);
6117                 }
6118                 req->num_stat_ctxs = cpu_to_le16(stats);
6119                 req->num_vnics = cpu_to_le16(vnics);
6120         }
6121         req->enables = cpu_to_le32(enables);
6122         return req;
6123 }
6124
6125 static struct hwrm_func_vf_cfg_input *
6126 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6127                              int ring_grps, int cp_rings, int stats, int vnics)
6128 {
6129         struct hwrm_func_vf_cfg_input *req;
6130         u32 enables = 0;
6131
6132         if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6133                 return NULL;
6134
6135         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6136         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6137                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6138         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6139         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6140                 enables |= tx_rings + ring_grps ?
6141                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6142         } else {
6143                 enables |= cp_rings ?
6144                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6145                 enables |= ring_grps ?
6146                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6147         }
6148         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6149         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6150
6151         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6152         req->num_tx_rings = cpu_to_le16(tx_rings);
6153         req->num_rx_rings = cpu_to_le16(rx_rings);
6154         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6155                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6156                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6157         } else {
6158                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6159                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6160                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6161         }
6162         req->num_stat_ctxs = cpu_to_le16(stats);
6163         req->num_vnics = cpu_to_le16(vnics);
6164
6165         req->enables = cpu_to_le32(enables);
6166         return req;
6167 }
6168
6169 static int
6170 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6171                            int ring_grps, int cp_rings, int stats, int vnics)
6172 {
6173         struct hwrm_func_cfg_input *req;
6174         int rc;
6175
6176         req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6177                                            cp_rings, stats, vnics);
6178         if (!req)
6179                 return -ENOMEM;
6180
6181         if (!req->enables) {
6182                 hwrm_req_drop(bp, req);
6183                 return 0;
6184         }
6185
6186         rc = hwrm_req_send(bp, req);
6187         if (rc)
6188                 return rc;
6189
6190         if (bp->hwrm_spec_code < 0x10601)
6191                 bp->hw_resc.resv_tx_rings = tx_rings;
6192
6193         return bnxt_hwrm_get_rings(bp);
6194 }
6195
6196 static int
6197 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6198                            int ring_grps, int cp_rings, int stats, int vnics)
6199 {
6200         struct hwrm_func_vf_cfg_input *req;
6201         int rc;
6202
6203         if (!BNXT_NEW_RM(bp)) {
6204                 bp->hw_resc.resv_tx_rings = tx_rings;
6205                 return 0;
6206         }
6207
6208         req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6209                                            cp_rings, stats, vnics);
6210         if (!req)
6211                 return -ENOMEM;
6212
6213         rc = hwrm_req_send(bp, req);
6214         if (rc)
6215                 return rc;
6216
6217         return bnxt_hwrm_get_rings(bp);
6218 }
6219
6220 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6221                                    int cp, int stat, int vnic)
6222 {
6223         if (BNXT_PF(bp))
6224                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6225                                                   vnic);
6226         else
6227                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6228                                                   vnic);
6229 }
6230
6231 int bnxt_nq_rings_in_use(struct bnxt *bp)
6232 {
6233         int cp = bp->cp_nr_rings;
6234         int ulp_msix, ulp_base;
6235
6236         ulp_msix = bnxt_get_ulp_msix_num(bp);
6237         if (ulp_msix) {
6238                 ulp_base = bnxt_get_ulp_msix_base(bp);
6239                 cp += ulp_msix;
6240                 if ((ulp_base + ulp_msix) > cp)
6241                         cp = ulp_base + ulp_msix;
6242         }
6243         return cp;
6244 }
6245
6246 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6247 {
6248         int cp;
6249
6250         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6251                 return bnxt_nq_rings_in_use(bp);
6252
6253         cp = bp->tx_nr_rings + bp->rx_nr_rings;
6254         return cp;
6255 }
6256
6257 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6258 {
6259         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6260         int cp = bp->cp_nr_rings;
6261
6262         if (!ulp_stat)
6263                 return cp;
6264
6265         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6266                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6267
6268         return cp + ulp_stat;
6269 }
6270
6271 /* Check if a default RSS map needs to be setup.  This function is only
6272  * used on older firmware that does not require reserving RX rings.
6273  */
6274 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6275 {
6276         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6277
6278         /* The RSS map is valid for RX rings set to resv_rx_rings */
6279         if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6280                 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6281                 if (!netif_is_rxfh_configured(bp->dev))
6282                         bnxt_set_dflt_rss_indir_tbl(bp);
6283         }
6284 }
6285
6286 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6287 {
6288         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6289         int cp = bnxt_cp_rings_in_use(bp);
6290         int nq = bnxt_nq_rings_in_use(bp);
6291         int rx = bp->rx_nr_rings, stat;
6292         int vnic = 1, grp = rx;
6293
6294         if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6295             bp->hwrm_spec_code >= 0x10601)
6296                 return true;
6297
6298         /* Old firmware does not need RX ring reservations but we still
6299          * need to setup a default RSS map when needed.  With new firmware
6300          * we go through RX ring reservations first and then set up the
6301          * RSS map for the successfully reserved RX rings when needed.
6302          */
6303         if (!BNXT_NEW_RM(bp)) {
6304                 bnxt_check_rss_tbl_no_rmgr(bp);
6305                 return false;
6306         }
6307         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6308                 vnic = rx + 1;
6309         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6310                 rx <<= 1;
6311         stat = bnxt_get_func_stat_ctxs(bp);
6312         if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6313             hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6314             (hw_resc->resv_hw_ring_grps != grp &&
6315              !(bp->flags & BNXT_FLAG_CHIP_P5)))
6316                 return true;
6317         if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6318             hw_resc->resv_irqs != nq)
6319                 return true;
6320         return false;
6321 }
6322
6323 static int __bnxt_reserve_rings(struct bnxt *bp)
6324 {
6325         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6326         int cp = bnxt_nq_rings_in_use(bp);
6327         int tx = bp->tx_nr_rings;
6328         int rx = bp->rx_nr_rings;
6329         int grp, rx_rings, rc;
6330         int vnic = 1, stat;
6331         bool sh = false;
6332
6333         if (!bnxt_need_reserve_rings(bp))
6334                 return 0;
6335
6336         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6337                 sh = true;
6338         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6339                 vnic = rx + 1;
6340         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6341                 rx <<= 1;
6342         grp = bp->rx_nr_rings;
6343         stat = bnxt_get_func_stat_ctxs(bp);
6344
6345         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6346         if (rc)
6347                 return rc;
6348
6349         tx = hw_resc->resv_tx_rings;
6350         if (BNXT_NEW_RM(bp)) {
6351                 rx = hw_resc->resv_rx_rings;
6352                 cp = hw_resc->resv_irqs;
6353                 grp = hw_resc->resv_hw_ring_grps;
6354                 vnic = hw_resc->resv_vnics;
6355                 stat = hw_resc->resv_stat_ctxs;
6356         }
6357
6358         rx_rings = rx;
6359         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6360                 if (rx >= 2) {
6361                         rx_rings = rx >> 1;
6362                 } else {
6363                         if (netif_running(bp->dev))
6364                                 return -ENOMEM;
6365
6366                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6367                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6368                         bp->dev->hw_features &= ~NETIF_F_LRO;
6369                         bp->dev->features &= ~NETIF_F_LRO;
6370                         bnxt_set_ring_params(bp);
6371                 }
6372         }
6373         rx_rings = min_t(int, rx_rings, grp);
6374         cp = min_t(int, cp, bp->cp_nr_rings);
6375         if (stat > bnxt_get_ulp_stat_ctxs(bp))
6376                 stat -= bnxt_get_ulp_stat_ctxs(bp);
6377         cp = min_t(int, cp, stat);
6378         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6379         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6380                 rx = rx_rings << 1;
6381         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6382         bp->tx_nr_rings = tx;
6383
6384         /* If we cannot reserve all the RX rings, reset the RSS map only
6385          * if absolutely necessary
6386          */
6387         if (rx_rings != bp->rx_nr_rings) {
6388                 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6389                             rx_rings, bp->rx_nr_rings);
6390                 if (netif_is_rxfh_configured(bp->dev) &&
6391                     (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6392                      bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6393                      bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6394                         netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6395                         bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6396                 }
6397         }
6398         bp->rx_nr_rings = rx_rings;
6399         bp->cp_nr_rings = cp;
6400
6401         if (!tx || !rx || !cp || !grp || !vnic || !stat)
6402                 return -ENOMEM;
6403
6404         if (!netif_is_rxfh_configured(bp->dev))
6405                 bnxt_set_dflt_rss_indir_tbl(bp);
6406
6407         return rc;
6408 }
6409
6410 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6411                                     int ring_grps, int cp_rings, int stats,
6412                                     int vnics)
6413 {
6414         struct hwrm_func_vf_cfg_input *req;
6415         u32 flags;
6416
6417         if (!BNXT_NEW_RM(bp))
6418                 return 0;
6419
6420         req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6421                                            cp_rings, stats, vnics);
6422         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6423                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6424                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6425                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6426                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6427                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6428         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6429                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6430
6431         req->flags = cpu_to_le32(flags);
6432         return hwrm_req_send_silent(bp, req);
6433 }
6434
6435 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6436                                     int ring_grps, int cp_rings, int stats,
6437                                     int vnics)
6438 {
6439         struct hwrm_func_cfg_input *req;
6440         u32 flags;
6441
6442         req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6443                                            cp_rings, stats, vnics);
6444         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6445         if (BNXT_NEW_RM(bp)) {
6446                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6447                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6448                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6449                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6450                 if (bp->flags & BNXT_FLAG_CHIP_P5)
6451                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6452                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6453                 else
6454                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6455         }
6456
6457         req->flags = cpu_to_le32(flags);
6458         return hwrm_req_send_silent(bp, req);
6459 }
6460
6461 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6462                                  int ring_grps, int cp_rings, int stats,
6463                                  int vnics)
6464 {
6465         if (bp->hwrm_spec_code < 0x10801)
6466                 return 0;
6467
6468         if (BNXT_PF(bp))
6469                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6470                                                 ring_grps, cp_rings, stats,
6471                                                 vnics);
6472
6473         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6474                                         cp_rings, stats, vnics);
6475 }
6476
6477 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6478 {
6479         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6480         struct hwrm_ring_aggint_qcaps_output *resp;
6481         struct hwrm_ring_aggint_qcaps_input *req;
6482         int rc;
6483
6484         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6485         coal_cap->num_cmpl_dma_aggr_max = 63;
6486         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6487         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6488         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6489         coal_cap->int_lat_tmr_min_max = 65535;
6490         coal_cap->int_lat_tmr_max_max = 65535;
6491         coal_cap->num_cmpl_aggr_int_max = 65535;
6492         coal_cap->timer_units = 80;
6493
6494         if (bp->hwrm_spec_code < 0x10902)
6495                 return;
6496
6497         if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6498                 return;
6499
6500         resp = hwrm_req_hold(bp, req);
6501         rc = hwrm_req_send_silent(bp, req);
6502         if (!rc) {
6503                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6504                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6505                 coal_cap->num_cmpl_dma_aggr_max =
6506                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6507                 coal_cap->num_cmpl_dma_aggr_during_int_max =
6508                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6509                 coal_cap->cmpl_aggr_dma_tmr_max =
6510                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6511                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6512                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6513                 coal_cap->int_lat_tmr_min_max =
6514                         le16_to_cpu(resp->int_lat_tmr_min_max);
6515                 coal_cap->int_lat_tmr_max_max =
6516                         le16_to_cpu(resp->int_lat_tmr_max_max);
6517                 coal_cap->num_cmpl_aggr_int_max =
6518                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
6519                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6520         }
6521         hwrm_req_drop(bp, req);
6522 }
6523
6524 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6525 {
6526         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6527
6528         return usec * 1000 / coal_cap->timer_units;
6529 }
6530
6531 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6532         struct bnxt_coal *hw_coal,
6533         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6534 {
6535         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6536         u16 val, tmr, max, flags = hw_coal->flags;
6537         u32 cmpl_params = coal_cap->cmpl_params;
6538
6539         max = hw_coal->bufs_per_record * 128;
6540         if (hw_coal->budget)
6541                 max = hw_coal->bufs_per_record * hw_coal->budget;
6542         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6543
6544         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6545         req->num_cmpl_aggr_int = cpu_to_le16(val);
6546
6547         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6548         req->num_cmpl_dma_aggr = cpu_to_le16(val);
6549
6550         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6551                       coal_cap->num_cmpl_dma_aggr_during_int_max);
6552         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6553
6554         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6555         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6556         req->int_lat_tmr_max = cpu_to_le16(tmr);
6557
6558         /* min timer set to 1/2 of interrupt timer */
6559         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6560                 val = tmr / 2;
6561                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6562                 req->int_lat_tmr_min = cpu_to_le16(val);
6563                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6564         }
6565
6566         /* buf timer set to 1/4 of interrupt timer */
6567         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6568         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6569
6570         if (cmpl_params &
6571             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6572                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6573                 val = clamp_t(u16, tmr, 1,
6574                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6575                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6576                 req->enables |=
6577                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6578         }
6579
6580         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6581             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6582                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6583         req->flags = cpu_to_le16(flags);
6584         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6585 }
6586
6587 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6588                                    struct bnxt_coal *hw_coal)
6589 {
6590         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
6591         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6592         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6593         u32 nq_params = coal_cap->nq_params;
6594         u16 tmr;
6595         int rc;
6596
6597         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6598                 return 0;
6599
6600         rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6601         if (rc)
6602                 return rc;
6603
6604         req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6605         req->flags =
6606                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6607
6608         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6609         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6610         req->int_lat_tmr_min = cpu_to_le16(tmr);
6611         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6612         return hwrm_req_send(bp, req);
6613 }
6614
6615 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6616 {
6617         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6618         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6619         struct bnxt_coal coal;
6620         int rc;
6621
6622         /* Tick values in micro seconds.
6623          * 1 coal_buf x bufs_per_record = 1 completion record.
6624          */
6625         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6626
6627         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6628         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6629
6630         if (!bnapi->rx_ring)
6631                 return -ENODEV;
6632
6633         rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6634         if (rc)
6635                 return rc;
6636
6637         bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6638
6639         req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6640
6641         return hwrm_req_send(bp, req_rx);
6642 }
6643
6644 int bnxt_hwrm_set_coal(struct bnxt *bp)
6645 {
6646         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6647                                                            *req;
6648         int i, rc;
6649
6650         rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6651         if (rc)
6652                 return rc;
6653
6654         rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6655         if (rc) {
6656                 hwrm_req_drop(bp, req_rx);
6657                 return rc;
6658         }
6659
6660         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6661         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
6662
6663         hwrm_req_hold(bp, req_rx);
6664         hwrm_req_hold(bp, req_tx);
6665         for (i = 0; i < bp->cp_nr_rings; i++) {
6666                 struct bnxt_napi *bnapi = bp->bnapi[i];
6667                 struct bnxt_coal *hw_coal;
6668                 u16 ring_id;
6669
6670                 req = req_rx;
6671                 if (!bnapi->rx_ring) {
6672                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6673                         req = req_tx;
6674                 } else {
6675                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6676                 }
6677                 req->ring_id = cpu_to_le16(ring_id);
6678
6679                 rc = hwrm_req_send(bp, req);
6680                 if (rc)
6681                         break;
6682
6683                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6684                         continue;
6685
6686                 if (bnapi->rx_ring && bnapi->tx_ring) {
6687                         req = req_tx;
6688                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6689                         req->ring_id = cpu_to_le16(ring_id);
6690                         rc = hwrm_req_send(bp, req);
6691                         if (rc)
6692                                 break;
6693                 }
6694                 if (bnapi->rx_ring)
6695                         hw_coal = &bp->rx_coal;
6696                 else
6697                         hw_coal = &bp->tx_coal;
6698                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6699         }
6700         hwrm_req_drop(bp, req_rx);
6701         hwrm_req_drop(bp, req_tx);
6702         return rc;
6703 }
6704
6705 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6706 {
6707         struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6708         struct hwrm_stat_ctx_free_input *req;
6709         int i;
6710
6711         if (!bp->bnapi)
6712                 return;
6713
6714         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6715                 return;
6716
6717         if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6718                 return;
6719         if (BNXT_FW_MAJ(bp) <= 20) {
6720                 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6721                         hwrm_req_drop(bp, req);
6722                         return;
6723                 }
6724                 hwrm_req_hold(bp, req0);
6725         }
6726         hwrm_req_hold(bp, req);
6727         for (i = 0; i < bp->cp_nr_rings; i++) {
6728                 struct bnxt_napi *bnapi = bp->bnapi[i];
6729                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6730
6731                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6732                         req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6733                         if (req0) {
6734                                 req0->stat_ctx_id = req->stat_ctx_id;
6735                                 hwrm_req_send(bp, req0);
6736                         }
6737                         hwrm_req_send(bp, req);
6738
6739                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6740                 }
6741         }
6742         hwrm_req_drop(bp, req);
6743         if (req0)
6744                 hwrm_req_drop(bp, req0);
6745 }
6746
6747 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6748 {
6749         struct hwrm_stat_ctx_alloc_output *resp;
6750         struct hwrm_stat_ctx_alloc_input *req;
6751         int rc, i;
6752
6753         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6754                 return 0;
6755
6756         rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6757         if (rc)
6758                 return rc;
6759
6760         req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6761         req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6762
6763         resp = hwrm_req_hold(bp, req);
6764         for (i = 0; i < bp->cp_nr_rings; i++) {
6765                 struct bnxt_napi *bnapi = bp->bnapi[i];
6766                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6767
6768                 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6769
6770                 rc = hwrm_req_send(bp, req);
6771                 if (rc)
6772                         break;
6773
6774                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6775
6776                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6777         }
6778         hwrm_req_drop(bp, req);
6779         return rc;
6780 }
6781
6782 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6783 {
6784         struct hwrm_func_qcfg_output *resp;
6785         struct hwrm_func_qcfg_input *req;
6786         u32 min_db_offset = 0;
6787         u16 flags;
6788         int rc;
6789
6790         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6791         if (rc)
6792                 return rc;
6793
6794         req->fid = cpu_to_le16(0xffff);
6795         resp = hwrm_req_hold(bp, req);
6796         rc = hwrm_req_send(bp, req);
6797         if (rc)
6798                 goto func_qcfg_exit;
6799
6800 #ifdef CONFIG_BNXT_SRIOV
6801         if (BNXT_VF(bp)) {
6802                 struct bnxt_vf_info *vf = &bp->vf;
6803
6804                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6805         } else {
6806                 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6807         }
6808 #endif
6809         flags = le16_to_cpu(resp->flags);
6810         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6811                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6812                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6813                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6814                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6815         }
6816         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6817                 bp->flags |= BNXT_FLAG_MULTI_HOST;
6818         if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6819                 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6820
6821         switch (resp->port_partition_type) {
6822         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6823         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6824         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6825                 bp->port_partition_type = resp->port_partition_type;
6826                 break;
6827         }
6828         if (bp->hwrm_spec_code < 0x10707 ||
6829             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6830                 bp->br_mode = BRIDGE_MODE_VEB;
6831         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6832                 bp->br_mode = BRIDGE_MODE_VEPA;
6833         else
6834                 bp->br_mode = BRIDGE_MODE_UNDEF;
6835
6836         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6837         if (!bp->max_mtu)
6838                 bp->max_mtu = BNXT_MAX_MTU;
6839
6840         if (bp->db_size)
6841                 goto func_qcfg_exit;
6842
6843         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6844                 if (BNXT_PF(bp))
6845                         min_db_offset = DB_PF_OFFSET_P5;
6846                 else
6847                         min_db_offset = DB_VF_OFFSET_P5;
6848         }
6849         bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6850                                  1024);
6851         if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6852             bp->db_size <= min_db_offset)
6853                 bp->db_size = pci_resource_len(bp->pdev, 2);
6854
6855 func_qcfg_exit:
6856         hwrm_req_drop(bp, req);
6857         return rc;
6858 }
6859
6860 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6861                         struct hwrm_func_backing_store_qcaps_output *resp)
6862 {
6863         struct bnxt_mem_init *mem_init;
6864         u16 init_mask;
6865         u8 init_val;
6866         u8 *offset;
6867         int i;
6868
6869         init_val = resp->ctx_kind_initializer;
6870         init_mask = le16_to_cpu(resp->ctx_init_mask);
6871         offset = &resp->qp_init_offset;
6872         mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6873         for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
6874                 mem_init->init_val = init_val;
6875                 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6876                 if (!init_mask)
6877                         continue;
6878                 if (i == BNXT_CTX_MEM_INIT_STAT)
6879                         offset = &resp->stat_init_offset;
6880                 if (init_mask & (1 << i))
6881                         mem_init->offset = *offset * 4;
6882                 else
6883                         mem_init->init_val = 0;
6884         }
6885         ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6886         ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6887         ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6888         ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6889         ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6890         ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
6891 }
6892
6893 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6894 {
6895         struct hwrm_func_backing_store_qcaps_output *resp;
6896         struct hwrm_func_backing_store_qcaps_input *req;
6897         int rc;
6898
6899         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6900                 return 0;
6901
6902         rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
6903         if (rc)
6904                 return rc;
6905
6906         resp = hwrm_req_hold(bp, req);
6907         rc = hwrm_req_send_silent(bp, req);
6908         if (!rc) {
6909                 struct bnxt_ctx_pg_info *ctx_pg;
6910                 struct bnxt_ctx_mem_info *ctx;
6911                 int i, tqm_rings;
6912
6913                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6914                 if (!ctx) {
6915                         rc = -ENOMEM;
6916                         goto ctx_err;
6917                 }
6918                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6919                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6920                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6921                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6922                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6923                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6924                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6925                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6926                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6927                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6928                 ctx->vnic_max_vnic_entries =
6929                         le16_to_cpu(resp->vnic_max_vnic_entries);
6930                 ctx->vnic_max_ring_table_entries =
6931                         le16_to_cpu(resp->vnic_max_ring_table_entries);
6932                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6933                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6934                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6935                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6936                 ctx->tqm_min_entries_per_ring =
6937                         le32_to_cpu(resp->tqm_min_entries_per_ring);
6938                 ctx->tqm_max_entries_per_ring =
6939                         le32_to_cpu(resp->tqm_max_entries_per_ring);
6940                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6941                 if (!ctx->tqm_entries_multiple)
6942                         ctx->tqm_entries_multiple = 1;
6943                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6944                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6945                 ctx->mrav_num_entries_units =
6946                         le16_to_cpu(resp->mrav_num_entries_units);
6947                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6948                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6949
6950                 bnxt_init_ctx_initializer(ctx, resp);
6951
6952                 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6953                 if (!ctx->tqm_fp_rings_count)
6954                         ctx->tqm_fp_rings_count = bp->max_q;
6955                 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6956                         ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
6957
6958                 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
6959                 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6960                 if (!ctx_pg) {
6961                         kfree(ctx);
6962                         rc = -ENOMEM;
6963                         goto ctx_err;
6964                 }
6965                 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6966                         ctx->tqm_mem[i] = ctx_pg;
6967                 bp->ctx = ctx;
6968         } else {
6969                 rc = 0;
6970         }
6971 ctx_err:
6972         hwrm_req_drop(bp, req);
6973         return rc;
6974 }
6975
6976 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6977                                   __le64 *pg_dir)
6978 {
6979         if (!rmem->nr_pages)
6980                 return;
6981
6982         BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
6983         if (rmem->depth >= 1) {
6984                 if (rmem->depth == 2)
6985                         *pg_attr |= 2;
6986                 else
6987                         *pg_attr |= 1;
6988                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6989         } else {
6990                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6991         }
6992 }
6993
6994 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
6995         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
6996          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
6997          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
6998          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
6999          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7000
7001 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7002 {
7003         struct hwrm_func_backing_store_cfg_input *req;
7004         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7005         struct bnxt_ctx_pg_info *ctx_pg;
7006         void **__req = (void **)&req;
7007         u32 req_len = sizeof(*req);
7008         __le32 *num_entries;
7009         __le64 *pg_dir;
7010         u32 flags = 0;
7011         u8 *pg_attr;
7012         u32 ena;
7013         int rc;
7014         int i;
7015
7016         if (!ctx)
7017                 return 0;
7018
7019         if (req_len > bp->hwrm_max_ext_req_len)
7020                 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
7021         rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
7022         if (rc)
7023                 return rc;
7024
7025         req->enables = cpu_to_le32(enables);
7026         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7027                 ctx_pg = &ctx->qp_mem;
7028                 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
7029                 req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7030                 req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7031                 req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7032                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7033                                       &req->qpc_pg_size_qpc_lvl,
7034                                       &req->qpc_page_dir);
7035         }
7036         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7037                 ctx_pg = &ctx->srq_mem;
7038                 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
7039                 req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7040                 req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7041                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7042                                       &req->srq_pg_size_srq_lvl,
7043                                       &req->srq_page_dir);
7044         }
7045         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7046                 ctx_pg = &ctx->cq_mem;
7047                 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
7048                 req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7049                 req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7050                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7051                                       &req->cq_pg_size_cq_lvl,
7052                                       &req->cq_page_dir);
7053         }
7054         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7055                 ctx_pg = &ctx->vnic_mem;
7056                 req->vnic_num_vnic_entries =
7057                         cpu_to_le16(ctx->vnic_max_vnic_entries);
7058                 req->vnic_num_ring_table_entries =
7059                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
7060                 req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7061                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7062                                       &req->vnic_pg_size_vnic_lvl,
7063                                       &req->vnic_page_dir);
7064         }
7065         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7066                 ctx_pg = &ctx->stat_mem;
7067                 req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7068                 req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7069                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7070                                       &req->stat_pg_size_stat_lvl,
7071                                       &req->stat_page_dir);
7072         }
7073         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7074                 ctx_pg = &ctx->mrav_mem;
7075                 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7076                 if (ctx->mrav_num_entries_units)
7077                         flags |=
7078                         FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7079                 req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7080                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7081                                       &req->mrav_pg_size_mrav_lvl,
7082                                       &req->mrav_page_dir);
7083         }
7084         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7085                 ctx_pg = &ctx->tim_mem;
7086                 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7087                 req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7088                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7089                                       &req->tim_pg_size_tim_lvl,
7090                                       &req->tim_page_dir);
7091         }
7092         for (i = 0, num_entries = &req->tqm_sp_num_entries,
7093              pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7094              pg_dir = &req->tqm_sp_page_dir,
7095              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7096              i < BNXT_MAX_TQM_RINGS;
7097              i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7098                 if (!(enables & ena))
7099                         continue;
7100
7101                 req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7102                 ctx_pg = ctx->tqm_mem[i];
7103                 *num_entries = cpu_to_le32(ctx_pg->entries);
7104                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7105         }
7106         req->flags = cpu_to_le32(flags);
7107         return hwrm_req_send(bp, req);
7108 }
7109
7110 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7111                                   struct bnxt_ctx_pg_info *ctx_pg)
7112 {
7113         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7114
7115         rmem->page_size = BNXT_PAGE_SIZE;
7116         rmem->pg_arr = ctx_pg->ctx_pg_arr;
7117         rmem->dma_arr = ctx_pg->ctx_dma_arr;
7118         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7119         if (rmem->depth >= 1)
7120                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7121         return bnxt_alloc_ring(bp, rmem);
7122 }
7123
7124 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7125                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7126                                   u8 depth, struct bnxt_mem_init *mem_init)
7127 {
7128         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7129         int rc;
7130
7131         if (!mem_size)
7132                 return -EINVAL;
7133
7134         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7135         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7136                 ctx_pg->nr_pages = 0;
7137                 return -EINVAL;
7138         }
7139         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7140                 int nr_tbls, i;
7141
7142                 rmem->depth = 2;
7143                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7144                                              GFP_KERNEL);
7145                 if (!ctx_pg->ctx_pg_tbl)
7146                         return -ENOMEM;
7147                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7148                 rmem->nr_pages = nr_tbls;
7149                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7150                 if (rc)
7151                         return rc;
7152                 for (i = 0; i < nr_tbls; i++) {
7153                         struct bnxt_ctx_pg_info *pg_tbl;
7154
7155                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7156                         if (!pg_tbl)
7157                                 return -ENOMEM;
7158                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7159                         rmem = &pg_tbl->ring_mem;
7160                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7161                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7162                         rmem->depth = 1;
7163                         rmem->nr_pages = MAX_CTX_PAGES;
7164                         rmem->mem_init = mem_init;
7165                         if (i == (nr_tbls - 1)) {
7166                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7167
7168                                 if (rem)
7169                                         rmem->nr_pages = rem;
7170                         }
7171                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7172                         if (rc)
7173                                 break;
7174                 }
7175         } else {
7176                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7177                 if (rmem->nr_pages > 1 || depth)
7178                         rmem->depth = 1;
7179                 rmem->mem_init = mem_init;
7180                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7181         }
7182         return rc;
7183 }
7184
7185 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7186                                   struct bnxt_ctx_pg_info *ctx_pg)
7187 {
7188         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7189
7190         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7191             ctx_pg->ctx_pg_tbl) {
7192                 int i, nr_tbls = rmem->nr_pages;
7193
7194                 for (i = 0; i < nr_tbls; i++) {
7195                         struct bnxt_ctx_pg_info *pg_tbl;
7196                         struct bnxt_ring_mem_info *rmem2;
7197
7198                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
7199                         if (!pg_tbl)
7200                                 continue;
7201                         rmem2 = &pg_tbl->ring_mem;
7202                         bnxt_free_ring(bp, rmem2);
7203                         ctx_pg->ctx_pg_arr[i] = NULL;
7204                         kfree(pg_tbl);
7205                         ctx_pg->ctx_pg_tbl[i] = NULL;
7206                 }
7207                 kfree(ctx_pg->ctx_pg_tbl);
7208                 ctx_pg->ctx_pg_tbl = NULL;
7209         }
7210         bnxt_free_ring(bp, rmem);
7211         ctx_pg->nr_pages = 0;
7212 }
7213
7214 void bnxt_free_ctx_mem(struct bnxt *bp)
7215 {
7216         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7217         int i;
7218
7219         if (!ctx)
7220                 return;
7221
7222         if (ctx->tqm_mem[0]) {
7223                 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7224                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7225                 kfree(ctx->tqm_mem[0]);
7226                 ctx->tqm_mem[0] = NULL;
7227         }
7228
7229         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7230         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7231         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7232         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7233         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7234         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7235         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7236         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7237 }
7238
7239 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7240 {
7241         struct bnxt_ctx_pg_info *ctx_pg;
7242         struct bnxt_ctx_mem_info *ctx;
7243         struct bnxt_mem_init *init;
7244         u32 mem_size, ena, entries;
7245         u32 entries_sp, min;
7246         u32 num_mr, num_ah;
7247         u32 extra_srqs = 0;
7248         u32 extra_qps = 0;
7249         u8 pg_lvl = 1;
7250         int i, rc;
7251
7252         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7253         if (rc) {
7254                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7255                            rc);
7256                 return rc;
7257         }
7258         ctx = bp->ctx;
7259         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7260                 return 0;
7261
7262         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7263                 pg_lvl = 2;
7264                 extra_qps = 65536;
7265                 extra_srqs = 8192;
7266         }
7267
7268         ctx_pg = &ctx->qp_mem;
7269         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7270                           extra_qps;
7271         if (ctx->qp_entry_size) {
7272                 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7273                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7274                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7275                 if (rc)
7276                         return rc;
7277         }
7278
7279         ctx_pg = &ctx->srq_mem;
7280         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7281         if (ctx->srq_entry_size) {
7282                 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7283                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7284                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7285                 if (rc)
7286                         return rc;
7287         }
7288
7289         ctx_pg = &ctx->cq_mem;
7290         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7291         if (ctx->cq_entry_size) {
7292                 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7293                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7294                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7295                 if (rc)
7296                         return rc;
7297         }
7298
7299         ctx_pg = &ctx->vnic_mem;
7300         ctx_pg->entries = ctx->vnic_max_vnic_entries +
7301                           ctx->vnic_max_ring_table_entries;
7302         if (ctx->vnic_entry_size) {
7303                 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7304                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7305                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7306                 if (rc)
7307                         return rc;
7308         }
7309
7310         ctx_pg = &ctx->stat_mem;
7311         ctx_pg->entries = ctx->stat_max_entries;
7312         if (ctx->stat_entry_size) {
7313                 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7314                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7315                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7316                 if (rc)
7317                         return rc;
7318         }
7319
7320         ena = 0;
7321         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7322                 goto skip_rdma;
7323
7324         ctx_pg = &ctx->mrav_mem;
7325         /* 128K extra is needed to accommodate static AH context
7326          * allocation by f/w.
7327          */
7328         num_mr = 1024 * 256;
7329         num_ah = 1024 * 128;
7330         ctx_pg->entries = num_mr + num_ah;
7331         if (ctx->mrav_entry_size) {
7332                 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7333                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7334                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7335                 if (rc)
7336                         return rc;
7337         }
7338         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7339         if (ctx->mrav_num_entries_units)
7340                 ctx_pg->entries =
7341                         ((num_mr / ctx->mrav_num_entries_units) << 16) |
7342                          (num_ah / ctx->mrav_num_entries_units);
7343
7344         ctx_pg = &ctx->tim_mem;
7345         ctx_pg->entries = ctx->qp_mem.entries;
7346         if (ctx->tim_entry_size) {
7347                 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7348                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7349                 if (rc)
7350                         return rc;
7351         }
7352         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7353
7354 skip_rdma:
7355         min = ctx->tqm_min_entries_per_ring;
7356         entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7357                      2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7358         entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7359         entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7360         entries = roundup(entries, ctx->tqm_entries_multiple);
7361         entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7362         for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7363                 ctx_pg = ctx->tqm_mem[i];
7364                 ctx_pg->entries = i ? entries : entries_sp;
7365                 if (ctx->tqm_entry_size) {
7366                         mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7367                         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7368                                                     NULL);
7369                         if (rc)
7370                                 return rc;
7371                 }
7372                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7373         }
7374         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7375         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7376         if (rc) {
7377                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7378                            rc);
7379                 return rc;
7380         }
7381         ctx->flags |= BNXT_CTX_FLAG_INITED;
7382         return 0;
7383 }
7384
7385 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7386 {
7387         struct hwrm_func_resource_qcaps_output *resp;
7388         struct hwrm_func_resource_qcaps_input *req;
7389         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7390         int rc;
7391
7392         rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7393         if (rc)
7394                 return rc;
7395
7396         req->fid = cpu_to_le16(0xffff);
7397         resp = hwrm_req_hold(bp, req);
7398         rc = hwrm_req_send_silent(bp, req);
7399         if (rc)
7400                 goto hwrm_func_resc_qcaps_exit;
7401
7402         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7403         if (!all)
7404                 goto hwrm_func_resc_qcaps_exit;
7405
7406         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7407         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7408         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7409         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7410         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7411         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7412         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7413         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7414         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7415         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7416         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7417         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7418         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7419         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7420         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7421         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7422
7423         if (bp->flags & BNXT_FLAG_CHIP_P5) {
7424                 u16 max_msix = le16_to_cpu(resp->max_msix);
7425
7426                 hw_resc->max_nqs = max_msix;
7427                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7428         }
7429
7430         if (BNXT_PF(bp)) {
7431                 struct bnxt_pf_info *pf = &bp->pf;
7432
7433                 pf->vf_resv_strategy =
7434                         le16_to_cpu(resp->vf_reservation_strategy);
7435                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7436                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7437         }
7438 hwrm_func_resc_qcaps_exit:
7439         hwrm_req_drop(bp, req);
7440         return rc;
7441 }
7442
7443 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7444 {
7445         struct hwrm_port_mac_ptp_qcfg_output *resp;
7446         struct hwrm_port_mac_ptp_qcfg_input *req;
7447         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7448         bool phc_cfg;
7449         u8 flags;
7450         int rc;
7451
7452         if (bp->hwrm_spec_code < 0x10801) {
7453                 rc = -ENODEV;
7454                 goto no_ptp;
7455         }
7456
7457         rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
7458         if (rc)
7459                 goto no_ptp;
7460
7461         req->port_id = cpu_to_le16(bp->pf.port_id);
7462         resp = hwrm_req_hold(bp, req);
7463         rc = hwrm_req_send(bp, req);
7464         if (rc)
7465                 goto exit;
7466
7467         flags = resp->flags;
7468         if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7469                 rc = -ENODEV;
7470                 goto exit;
7471         }
7472         if (!ptp) {
7473                 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7474                 if (!ptp) {
7475                         rc = -ENOMEM;
7476                         goto exit;
7477                 }
7478                 ptp->bp = bp;
7479                 bp->ptp_cfg = ptp;
7480         }
7481         if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7482                 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7483                 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7484         } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7485                 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7486                 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7487         } else {
7488                 rc = -ENODEV;
7489                 goto exit;
7490         }
7491         phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
7492         rc = bnxt_ptp_init(bp, phc_cfg);
7493         if (rc)
7494                 netdev_warn(bp->dev, "PTP initialization failed.\n");
7495 exit:
7496         hwrm_req_drop(bp, req);
7497         if (!rc)
7498                 return 0;
7499
7500 no_ptp:
7501         bnxt_ptp_clear(bp);
7502         kfree(ptp);
7503         bp->ptp_cfg = NULL;
7504         return rc;
7505 }
7506
7507 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7508 {
7509         struct hwrm_func_qcaps_output *resp;
7510         struct hwrm_func_qcaps_input *req;
7511         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7512         u32 flags, flags_ext;
7513         int rc;
7514
7515         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7516         if (rc)
7517                 return rc;
7518
7519         req->fid = cpu_to_le16(0xffff);
7520         resp = hwrm_req_hold(bp, req);
7521         rc = hwrm_req_send(bp, req);
7522         if (rc)
7523                 goto hwrm_func_qcaps_exit;
7524
7525         flags = le32_to_cpu(resp->flags);
7526         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7527                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7528         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7529                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7530         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7531                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7532         if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7533                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7534         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7535                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7536         if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7537                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7538         if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7539                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7540         if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7541                 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7542         if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
7543                 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
7544
7545         flags_ext = le32_to_cpu(resp->flags_ext);
7546         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7547                 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7548         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7549                 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
7550         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
7551                 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
7552         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
7553                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
7554         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
7555                 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
7556
7557         bp->tx_push_thresh = 0;
7558         if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7559             BNXT_FW_MAJ(bp) > 217)
7560                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7561
7562         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7563         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7564         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7565         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7566         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7567         if (!hw_resc->max_hw_ring_grps)
7568                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7569         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7570         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7571         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7572
7573         if (BNXT_PF(bp)) {
7574                 struct bnxt_pf_info *pf = &bp->pf;
7575
7576                 pf->fw_fid = le16_to_cpu(resp->fid);
7577                 pf->port_id = le16_to_cpu(resp->port_id);
7578                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7579                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7580                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7581                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7582                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7583                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7584                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7585                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7586                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7587                 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7588                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7589                         bp->flags |= BNXT_FLAG_WOL_CAP;
7590                 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7591                         __bnxt_hwrm_ptp_qcfg(bp);
7592                 } else {
7593                         bnxt_ptp_clear(bp);
7594                         kfree(bp->ptp_cfg);
7595                         bp->ptp_cfg = NULL;
7596                 }
7597         } else {
7598 #ifdef CONFIG_BNXT_SRIOV
7599                 struct bnxt_vf_info *vf = &bp->vf;
7600
7601                 vf->fw_fid = le16_to_cpu(resp->fid);
7602                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7603 #endif
7604         }
7605
7606 hwrm_func_qcaps_exit:
7607         hwrm_req_drop(bp, req);
7608         return rc;
7609 }
7610
7611 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
7612 {
7613         struct hwrm_dbg_qcaps_output *resp;
7614         struct hwrm_dbg_qcaps_input *req;
7615         int rc;
7616
7617         bp->fw_dbg_cap = 0;
7618         if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
7619                 return;
7620
7621         rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
7622         if (rc)
7623                 return;
7624
7625         req->fid = cpu_to_le16(0xffff);
7626         resp = hwrm_req_hold(bp, req);
7627         rc = hwrm_req_send(bp, req);
7628         if (rc)
7629                 goto hwrm_dbg_qcaps_exit;
7630
7631         bp->fw_dbg_cap = le32_to_cpu(resp->flags);
7632
7633 hwrm_dbg_qcaps_exit:
7634         hwrm_req_drop(bp, req);
7635 }
7636
7637 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7638
7639 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7640 {
7641         int rc;
7642
7643         rc = __bnxt_hwrm_func_qcaps(bp);
7644         if (rc)
7645                 return rc;
7646
7647         bnxt_hwrm_dbg_qcaps(bp);
7648
7649         rc = bnxt_hwrm_queue_qportcfg(bp);
7650         if (rc) {
7651                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7652                 return rc;
7653         }
7654         if (bp->hwrm_spec_code >= 0x10803) {
7655                 rc = bnxt_alloc_ctx_mem(bp);
7656                 if (rc)
7657                         return rc;
7658                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7659                 if (!rc)
7660                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7661         }
7662         return 0;
7663 }
7664
7665 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7666 {
7667         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7668         struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
7669         u32 flags;
7670         int rc;
7671
7672         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7673                 return 0;
7674
7675         rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7676         if (rc)
7677                 return rc;
7678
7679         resp = hwrm_req_hold(bp, req);
7680         rc = hwrm_req_send(bp, req);
7681         if (rc)
7682                 goto hwrm_cfa_adv_qcaps_exit;
7683
7684         flags = le32_to_cpu(resp->flags);
7685         if (flags &
7686             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7687                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7688
7689 hwrm_cfa_adv_qcaps_exit:
7690         hwrm_req_drop(bp, req);
7691         return rc;
7692 }
7693
7694 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7695 {
7696         if (bp->fw_health)
7697                 return 0;
7698
7699         bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7700         if (!bp->fw_health)
7701                 return -ENOMEM;
7702
7703         mutex_init(&bp->fw_health->lock);
7704         return 0;
7705 }
7706
7707 static int bnxt_alloc_fw_health(struct bnxt *bp)
7708 {
7709         int rc;
7710
7711         if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7712             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7713                 return 0;
7714
7715         rc = __bnxt_alloc_fw_health(bp);
7716         if (rc) {
7717                 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7718                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7719                 return rc;
7720         }
7721
7722         return 0;
7723 }
7724
7725 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7726 {
7727         writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7728                                          BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7729                                          BNXT_FW_HEALTH_WIN_MAP_OFF);
7730 }
7731
7732 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7733 {
7734         struct bnxt_fw_health *fw_health = bp->fw_health;
7735         u32 reg_type;
7736
7737         if (!fw_health)
7738                 return;
7739
7740         reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7741         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7742                 fw_health->status_reliable = false;
7743
7744         reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
7745         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7746                 fw_health->resets_reliable = false;
7747 }
7748
7749 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7750 {
7751         void __iomem *hs;
7752         u32 status_loc;
7753         u32 reg_type;
7754         u32 sig;
7755
7756         if (bp->fw_health)
7757                 bp->fw_health->status_reliable = false;
7758
7759         __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7760         hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7761
7762         sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7763         if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7764                 if (!bp->chip_num) {
7765                         __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7766                         bp->chip_num = readl(bp->bar0 +
7767                                              BNXT_FW_HEALTH_WIN_BASE +
7768                                              BNXT_GRC_REG_CHIP_NUM);
7769                 }
7770                 if (!BNXT_CHIP_P5(bp))
7771                         return;
7772
7773                 status_loc = BNXT_GRC_REG_STATUS_P5 |
7774                              BNXT_FW_HEALTH_REG_TYPE_BAR0;
7775         } else {
7776                 status_loc = readl(hs + offsetof(struct hcomm_status,
7777                                                  fw_status_loc));
7778         }
7779
7780         if (__bnxt_alloc_fw_health(bp)) {
7781                 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7782                 return;
7783         }
7784
7785         bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7786         reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7787         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7788                 __bnxt_map_fw_health_reg(bp, status_loc);
7789                 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7790                         BNXT_FW_HEALTH_WIN_OFF(status_loc);
7791         }
7792
7793         bp->fw_health->status_reliable = true;
7794 }
7795
7796 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7797 {
7798         struct bnxt_fw_health *fw_health = bp->fw_health;
7799         u32 reg_base = 0xffffffff;
7800         int i;
7801
7802         bp->fw_health->status_reliable = false;
7803         bp->fw_health->resets_reliable = false;
7804         /* Only pre-map the monitoring GRC registers using window 3 */
7805         for (i = 0; i < 4; i++) {
7806                 u32 reg = fw_health->regs[i];
7807
7808                 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7809                         continue;
7810                 if (reg_base == 0xffffffff)
7811                         reg_base = reg & BNXT_GRC_BASE_MASK;
7812                 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7813                         return -ERANGE;
7814                 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7815         }
7816         bp->fw_health->status_reliable = true;
7817         bp->fw_health->resets_reliable = true;
7818         if (reg_base == 0xffffffff)
7819                 return 0;
7820
7821         __bnxt_map_fw_health_reg(bp, reg_base);
7822         return 0;
7823 }
7824
7825 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
7826 {
7827         if (!bp->fw_health)
7828                 return;
7829
7830         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
7831                 bp->fw_health->status_reliable = true;
7832                 bp->fw_health->resets_reliable = true;
7833         } else {
7834                 bnxt_try_map_fw_health_reg(bp);
7835         }
7836 }
7837
7838 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7839 {
7840         struct bnxt_fw_health *fw_health = bp->fw_health;
7841         struct hwrm_error_recovery_qcfg_output *resp;
7842         struct hwrm_error_recovery_qcfg_input *req;
7843         int rc, i;
7844
7845         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7846                 return 0;
7847
7848         rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7849         if (rc)
7850                 return rc;
7851
7852         resp = hwrm_req_hold(bp, req);
7853         rc = hwrm_req_send(bp, req);
7854         if (rc)
7855                 goto err_recovery_out;
7856         fw_health->flags = le32_to_cpu(resp->flags);
7857         if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7858             !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7859                 rc = -EINVAL;
7860                 goto err_recovery_out;
7861         }
7862         fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7863         fw_health->master_func_wait_dsecs =
7864                 le32_to_cpu(resp->master_func_wait_period);
7865         fw_health->normal_func_wait_dsecs =
7866                 le32_to_cpu(resp->normal_func_wait_period);
7867         fw_health->post_reset_wait_dsecs =
7868                 le32_to_cpu(resp->master_func_wait_period_after_reset);
7869         fw_health->post_reset_max_wait_dsecs =
7870                 le32_to_cpu(resp->max_bailout_time_after_reset);
7871         fw_health->regs[BNXT_FW_HEALTH_REG] =
7872                 le32_to_cpu(resp->fw_health_status_reg);
7873         fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7874                 le32_to_cpu(resp->fw_heartbeat_reg);
7875         fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7876                 le32_to_cpu(resp->fw_reset_cnt_reg);
7877         fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7878                 le32_to_cpu(resp->reset_inprogress_reg);
7879         fw_health->fw_reset_inprog_reg_mask =
7880                 le32_to_cpu(resp->reset_inprogress_reg_mask);
7881         fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7882         if (fw_health->fw_reset_seq_cnt >= 16) {
7883                 rc = -EINVAL;
7884                 goto err_recovery_out;
7885         }
7886         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7887                 fw_health->fw_reset_seq_regs[i] =
7888                         le32_to_cpu(resp->reset_reg[i]);
7889                 fw_health->fw_reset_seq_vals[i] =
7890                         le32_to_cpu(resp->reset_reg_val[i]);
7891                 fw_health->fw_reset_seq_delay_msec[i] =
7892                         resp->delay_after_reset[i];
7893         }
7894 err_recovery_out:
7895         hwrm_req_drop(bp, req);
7896         if (!rc)
7897                 rc = bnxt_map_fw_health_regs(bp);
7898         if (rc)
7899                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7900         return rc;
7901 }
7902
7903 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7904 {
7905         struct hwrm_func_reset_input *req;
7906         int rc;
7907
7908         rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
7909         if (rc)
7910                 return rc;
7911
7912         req->enables = 0;
7913         hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
7914         return hwrm_req_send(bp, req);
7915 }
7916
7917 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7918 {
7919         struct hwrm_nvm_get_dev_info_output nvm_info;
7920
7921         if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7922                 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7923                          nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7924                          nvm_info.nvm_cfg_ver_upd);
7925 }
7926
7927 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7928 {
7929         struct hwrm_queue_qportcfg_output *resp;
7930         struct hwrm_queue_qportcfg_input *req;
7931         u8 i, j, *qptr;
7932         bool no_rdma;
7933         int rc = 0;
7934
7935         rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
7936         if (rc)
7937                 return rc;
7938
7939         resp = hwrm_req_hold(bp, req);
7940         rc = hwrm_req_send(bp, req);
7941         if (rc)
7942                 goto qportcfg_exit;
7943
7944         if (!resp->max_configurable_queues) {
7945                 rc = -EINVAL;
7946                 goto qportcfg_exit;
7947         }
7948         bp->max_tc = resp->max_configurable_queues;
7949         bp->max_lltc = resp->max_configurable_lossless_queues;
7950         if (bp->max_tc > BNXT_MAX_QUEUE)
7951                 bp->max_tc = BNXT_MAX_QUEUE;
7952
7953         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7954         qptr = &resp->queue_id0;
7955         for (i = 0, j = 0; i < bp->max_tc; i++) {
7956                 bp->q_info[j].queue_id = *qptr;
7957                 bp->q_ids[i] = *qptr++;
7958                 bp->q_info[j].queue_profile = *qptr++;
7959                 bp->tc_to_qidx[j] = j;
7960                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7961                     (no_rdma && BNXT_PF(bp)))
7962                         j++;
7963         }
7964         bp->max_q = bp->max_tc;
7965         bp->max_tc = max_t(u8, j, 1);
7966
7967         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7968                 bp->max_tc = 1;
7969
7970         if (bp->max_lltc > bp->max_tc)
7971                 bp->max_lltc = bp->max_tc;
7972
7973 qportcfg_exit:
7974         hwrm_req_drop(bp, req);
7975         return rc;
7976 }
7977
7978 static int bnxt_hwrm_poll(struct bnxt *bp)
7979 {
7980         struct hwrm_ver_get_input *req;
7981         int rc;
7982
7983         rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7984         if (rc)
7985                 return rc;
7986
7987         req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7988         req->hwrm_intf_min = HWRM_VERSION_MINOR;
7989         req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7990
7991         hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
7992         rc = hwrm_req_send(bp, req);
7993         return rc;
7994 }
7995
7996 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7997 {
7998         struct hwrm_ver_get_output *resp;
7999         struct hwrm_ver_get_input *req;
8000         u16 fw_maj, fw_min, fw_bld, fw_rsv;
8001         u32 dev_caps_cfg, hwrm_ver;
8002         int rc, len;
8003
8004         rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8005         if (rc)
8006                 return rc;
8007
8008         hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
8009         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
8010         req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8011         req->hwrm_intf_min = HWRM_VERSION_MINOR;
8012         req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
8013
8014         resp = hwrm_req_hold(bp, req);
8015         rc = hwrm_req_send(bp, req);
8016         if (rc)
8017                 goto hwrm_ver_get_exit;
8018
8019         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
8020
8021         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
8022                              resp->hwrm_intf_min_8b << 8 |
8023                              resp->hwrm_intf_upd_8b;
8024         if (resp->hwrm_intf_maj_8b < 1) {
8025                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
8026                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8027                             resp->hwrm_intf_upd_8b);
8028                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
8029         }
8030
8031         hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
8032                         HWRM_VERSION_UPDATE;
8033
8034         if (bp->hwrm_spec_code > hwrm_ver)
8035                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8036                          HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
8037                          HWRM_VERSION_UPDATE);
8038         else
8039                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8040                          resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8041                          resp->hwrm_intf_upd_8b);
8042
8043         fw_maj = le16_to_cpu(resp->hwrm_fw_major);
8044         if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
8045                 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
8046                 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
8047                 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
8048                 len = FW_VER_STR_LEN;
8049         } else {
8050                 fw_maj = resp->hwrm_fw_maj_8b;
8051                 fw_min = resp->hwrm_fw_min_8b;
8052                 fw_bld = resp->hwrm_fw_bld_8b;
8053                 fw_rsv = resp->hwrm_fw_rsvd_8b;
8054                 len = BC_HWRM_STR_LEN;
8055         }
8056         bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
8057         snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
8058                  fw_rsv);
8059
8060         if (strlen(resp->active_pkg_name)) {
8061                 int fw_ver_len = strlen(bp->fw_ver_str);
8062
8063                 snprintf(bp->fw_ver_str + fw_ver_len,
8064                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8065                          resp->active_pkg_name);
8066                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8067         }
8068
8069         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8070         if (!bp->hwrm_cmd_timeout)
8071                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8072         bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
8073         if (!bp->hwrm_cmd_max_timeout)
8074                 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
8075         else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
8076                 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
8077                             bp->hwrm_cmd_max_timeout / 1000);
8078
8079         if (resp->hwrm_intf_maj_8b >= 1) {
8080                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
8081                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8082         }
8083         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8084                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
8085
8086         bp->chip_num = le16_to_cpu(resp->chip_num);
8087         bp->chip_rev = resp->chip_rev;
8088         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8089             !resp->chip_metal)
8090                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
8091
8092         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8093         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8094             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
8095                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8096
8097         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8098                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8099
8100         if (dev_caps_cfg &
8101             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8102                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8103
8104         if (dev_caps_cfg &
8105             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8106                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8107
8108         if (dev_caps_cfg &
8109             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8110                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8111
8112 hwrm_ver_get_exit:
8113         hwrm_req_drop(bp, req);
8114         return rc;
8115 }
8116
8117 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8118 {
8119         struct hwrm_fw_set_time_input *req;
8120         struct tm tm;
8121         time64_t now = ktime_get_real_seconds();
8122         int rc;
8123
8124         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8125             bp->hwrm_spec_code < 0x10400)
8126                 return -EOPNOTSUPP;
8127
8128         time64_to_tm(now, 0, &tm);
8129         rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8130         if (rc)
8131                 return rc;
8132
8133         req->year = cpu_to_le16(1900 + tm.tm_year);
8134         req->month = 1 + tm.tm_mon;
8135         req->day = tm.tm_mday;
8136         req->hour = tm.tm_hour;
8137         req->minute = tm.tm_min;
8138         req->second = tm.tm_sec;
8139         return hwrm_req_send(bp, req);
8140 }
8141
8142 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8143 {
8144         u64 sw_tmp;
8145
8146         hw &= mask;
8147         sw_tmp = (*sw & ~mask) | hw;
8148         if (hw < (*sw & mask))
8149                 sw_tmp += mask + 1;
8150         WRITE_ONCE(*sw, sw_tmp);
8151 }
8152
8153 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8154                                     int count, bool ignore_zero)
8155 {
8156         int i;
8157
8158         for (i = 0; i < count; i++) {
8159                 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8160
8161                 if (ignore_zero && !hw)
8162                         continue;
8163
8164                 if (masks[i] == -1ULL)
8165                         sw_stats[i] = hw;
8166                 else
8167                         bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8168         }
8169 }
8170
8171 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8172 {
8173         if (!stats->hw_stats)
8174                 return;
8175
8176         __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8177                                 stats->hw_masks, stats->len / 8, false);
8178 }
8179
8180 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8181 {
8182         struct bnxt_stats_mem *ring0_stats;
8183         bool ignore_zero = false;
8184         int i;
8185
8186         /* Chip bug.  Counter intermittently becomes 0. */
8187         if (bp->flags & BNXT_FLAG_CHIP_P5)
8188                 ignore_zero = true;
8189
8190         for (i = 0; i < bp->cp_nr_rings; i++) {
8191                 struct bnxt_napi *bnapi = bp->bnapi[i];
8192                 struct bnxt_cp_ring_info *cpr;
8193                 struct bnxt_stats_mem *stats;
8194
8195                 cpr = &bnapi->cp_ring;
8196                 stats = &cpr->stats;
8197                 if (!i)
8198                         ring0_stats = stats;
8199                 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8200                                         ring0_stats->hw_masks,
8201                                         ring0_stats->len / 8, ignore_zero);
8202         }
8203         if (bp->flags & BNXT_FLAG_PORT_STATS) {
8204                 struct bnxt_stats_mem *stats = &bp->port_stats;
8205                 __le64 *hw_stats = stats->hw_stats;
8206                 u64 *sw_stats = stats->sw_stats;
8207                 u64 *masks = stats->hw_masks;
8208                 int cnt;
8209
8210                 cnt = sizeof(struct rx_port_stats) / 8;
8211                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8212
8213                 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8214                 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8215                 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8216                 cnt = sizeof(struct tx_port_stats) / 8;
8217                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8218         }
8219         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8220                 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8221                 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8222         }
8223 }
8224
8225 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8226 {
8227         struct hwrm_port_qstats_input *req;
8228         struct bnxt_pf_info *pf = &bp->pf;
8229         int rc;
8230
8231         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8232                 return 0;
8233
8234         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8235                 return -EOPNOTSUPP;
8236
8237         rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8238         if (rc)
8239                 return rc;
8240
8241         req->flags = flags;
8242         req->port_id = cpu_to_le16(pf->port_id);
8243         req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8244                                             BNXT_TX_PORT_STATS_BYTE_OFFSET);
8245         req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8246         return hwrm_req_send(bp, req);
8247 }
8248
8249 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8250 {
8251         struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8252         struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8253         struct hwrm_port_qstats_ext_output *resp_qs;
8254         struct hwrm_port_qstats_ext_input *req_qs;
8255         struct bnxt_pf_info *pf = &bp->pf;
8256         u32 tx_stat_size;
8257         int rc;
8258
8259         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8260                 return 0;
8261
8262         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8263                 return -EOPNOTSUPP;
8264
8265         rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8266         if (rc)
8267                 return rc;
8268
8269         req_qs->flags = flags;
8270         req_qs->port_id = cpu_to_le16(pf->port_id);
8271         req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8272         req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8273         tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8274                        sizeof(struct tx_port_stats_ext) : 0;
8275         req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8276         req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8277         resp_qs = hwrm_req_hold(bp, req_qs);
8278         rc = hwrm_req_send(bp, req_qs);
8279         if (!rc) {
8280                 bp->fw_rx_stats_ext_size =
8281                         le16_to_cpu(resp_qs->rx_stat_size) / 8;
8282                 if (BNXT_FW_MAJ(bp) < 220 &&
8283                     bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
8284                         bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
8285
8286                 bp->fw_tx_stats_ext_size = tx_stat_size ?
8287                         le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
8288         } else {
8289                 bp->fw_rx_stats_ext_size = 0;
8290                 bp->fw_tx_stats_ext_size = 0;
8291         }
8292         hwrm_req_drop(bp, req_qs);
8293
8294         if (flags)
8295                 return rc;
8296
8297         if (bp->fw_tx_stats_ext_size <=
8298             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8299                 bp->pri2cos_valid = 0;
8300                 return rc;
8301         }
8302
8303         rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8304         if (rc)
8305                 return rc;
8306
8307         req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8308
8309         resp_qc = hwrm_req_hold(bp, req_qc);
8310         rc = hwrm_req_send(bp, req_qc);
8311         if (!rc) {
8312                 u8 *pri2cos;
8313                 int i, j;
8314
8315                 pri2cos = &resp_qc->pri0_cos_queue_id;
8316                 for (i = 0; i < 8; i++) {
8317                         u8 queue_id = pri2cos[i];
8318                         u8 queue_idx;
8319
8320                         /* Per port queue IDs start from 0, 10, 20, etc */
8321                         queue_idx = queue_id % 10;
8322                         if (queue_idx > BNXT_MAX_QUEUE) {
8323                                 bp->pri2cos_valid = false;
8324                                 hwrm_req_drop(bp, req_qc);
8325                                 return rc;
8326                         }
8327                         for (j = 0; j < bp->max_q; j++) {
8328                                 if (bp->q_ids[j] == queue_id)
8329                                         bp->pri2cos_idx[i] = queue_idx;
8330                         }
8331                 }
8332                 bp->pri2cos_valid = true;
8333         }
8334         hwrm_req_drop(bp, req_qc);
8335
8336         return rc;
8337 }
8338
8339 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8340 {
8341         bnxt_hwrm_tunnel_dst_port_free(bp,
8342                 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8343         bnxt_hwrm_tunnel_dst_port_free(bp,
8344                 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8345 }
8346
8347 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8348 {
8349         int rc, i;
8350         u32 tpa_flags = 0;
8351
8352         if (set_tpa)
8353                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8354         else if (BNXT_NO_FW_ACCESS(bp))
8355                 return 0;
8356         for (i = 0; i < bp->nr_vnics; i++) {
8357                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8358                 if (rc) {
8359                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8360                                    i, rc);
8361                         return rc;
8362                 }
8363         }
8364         return 0;
8365 }
8366
8367 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8368 {
8369         int i;
8370
8371         for (i = 0; i < bp->nr_vnics; i++)
8372                 bnxt_hwrm_vnic_set_rss(bp, i, false);
8373 }
8374
8375 static void bnxt_clear_vnic(struct bnxt *bp)
8376 {
8377         if (!bp->vnic_info)
8378                 return;
8379
8380         bnxt_hwrm_clear_vnic_filter(bp);
8381         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8382                 /* clear all RSS setting before free vnic ctx */
8383                 bnxt_hwrm_clear_vnic_rss(bp);
8384                 bnxt_hwrm_vnic_ctx_free(bp);
8385         }
8386         /* before free the vnic, undo the vnic tpa settings */
8387         if (bp->flags & BNXT_FLAG_TPA)
8388                 bnxt_set_tpa(bp, false);
8389         bnxt_hwrm_vnic_free(bp);
8390         if (bp->flags & BNXT_FLAG_CHIP_P5)
8391                 bnxt_hwrm_vnic_ctx_free(bp);
8392 }
8393
8394 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8395                                     bool irq_re_init)
8396 {
8397         bnxt_clear_vnic(bp);
8398         bnxt_hwrm_ring_free(bp, close_path);
8399         bnxt_hwrm_ring_grp_free(bp);
8400         if (irq_re_init) {
8401                 bnxt_hwrm_stat_ctx_free(bp);
8402                 bnxt_hwrm_free_tunnel_ports(bp);
8403         }
8404 }
8405
8406 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8407 {
8408         struct hwrm_func_cfg_input *req;
8409         u8 evb_mode;
8410         int rc;
8411
8412         if (br_mode == BRIDGE_MODE_VEB)
8413                 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8414         else if (br_mode == BRIDGE_MODE_VEPA)
8415                 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8416         else
8417                 return -EINVAL;
8418
8419         rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8420         if (rc)
8421                 return rc;
8422
8423         req->fid = cpu_to_le16(0xffff);
8424         req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8425         req->evb_mode = evb_mode;
8426         return hwrm_req_send(bp, req);
8427 }
8428
8429 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8430 {
8431         struct hwrm_func_cfg_input *req;
8432         int rc;
8433
8434         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8435                 return 0;
8436
8437         rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8438         if (rc)
8439                 return rc;
8440
8441         req->fid = cpu_to_le16(0xffff);
8442         req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8443         req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8444         if (size == 128)
8445                 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8446
8447         return hwrm_req_send(bp, req);
8448 }
8449
8450 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8451 {
8452         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8453         int rc;
8454
8455         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8456                 goto skip_rss_ctx;
8457
8458         /* allocate context for vnic */
8459         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8460         if (rc) {
8461                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8462                            vnic_id, rc);
8463                 goto vnic_setup_err;
8464         }
8465         bp->rsscos_nr_ctxs++;
8466
8467         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8468                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8469                 if (rc) {
8470                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8471                                    vnic_id, rc);
8472                         goto vnic_setup_err;
8473                 }
8474                 bp->rsscos_nr_ctxs++;
8475         }
8476
8477 skip_rss_ctx:
8478         /* configure default vnic, ring grp */
8479         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8480         if (rc) {
8481                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8482                            vnic_id, rc);
8483                 goto vnic_setup_err;
8484         }
8485
8486         /* Enable RSS hashing on vnic */
8487         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8488         if (rc) {
8489                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8490                            vnic_id, rc);
8491                 goto vnic_setup_err;
8492         }
8493
8494         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8495                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8496                 if (rc) {
8497                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8498                                    vnic_id, rc);
8499                 }
8500         }
8501
8502 vnic_setup_err:
8503         return rc;
8504 }
8505
8506 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8507 {
8508         int rc, i, nr_ctxs;
8509
8510         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8511         for (i = 0; i < nr_ctxs; i++) {
8512                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8513                 if (rc) {
8514                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8515                                    vnic_id, i, rc);
8516                         break;
8517                 }
8518                 bp->rsscos_nr_ctxs++;
8519         }
8520         if (i < nr_ctxs)
8521                 return -ENOMEM;
8522
8523         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8524         if (rc) {
8525                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8526                            vnic_id, rc);
8527                 return rc;
8528         }
8529         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8530         if (rc) {
8531                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8532                            vnic_id, rc);
8533                 return rc;
8534         }
8535         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8536                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8537                 if (rc) {
8538                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8539                                    vnic_id, rc);
8540                 }
8541         }
8542         return rc;
8543 }
8544
8545 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8546 {
8547         if (bp->flags & BNXT_FLAG_CHIP_P5)
8548                 return __bnxt_setup_vnic_p5(bp, vnic_id);
8549         else
8550                 return __bnxt_setup_vnic(bp, vnic_id);
8551 }
8552
8553 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8554 {
8555 #ifdef CONFIG_RFS_ACCEL
8556         int i, rc = 0;
8557
8558         if (bp->flags & BNXT_FLAG_CHIP_P5)
8559                 return 0;
8560
8561         for (i = 0; i < bp->rx_nr_rings; i++) {
8562                 struct bnxt_vnic_info *vnic;
8563                 u16 vnic_id = i + 1;
8564                 u16 ring_id = i;
8565
8566                 if (vnic_id >= bp->nr_vnics)
8567                         break;
8568
8569                 vnic = &bp->vnic_info[vnic_id];
8570                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8571                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8572                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8573                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8574                 if (rc) {
8575                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8576                                    vnic_id, rc);
8577                         break;
8578                 }
8579                 rc = bnxt_setup_vnic(bp, vnic_id);
8580                 if (rc)
8581                         break;
8582         }
8583         return rc;
8584 #else
8585         return 0;
8586 #endif
8587 }
8588
8589 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8590 static bool bnxt_promisc_ok(struct bnxt *bp)
8591 {
8592 #ifdef CONFIG_BNXT_SRIOV
8593         if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8594                 return false;
8595 #endif
8596         return true;
8597 }
8598
8599 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8600 {
8601         unsigned int rc = 0;
8602
8603         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8604         if (rc) {
8605                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8606                            rc);
8607                 return rc;
8608         }
8609
8610         rc = bnxt_hwrm_vnic_cfg(bp, 1);
8611         if (rc) {
8612                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8613                            rc);
8614                 return rc;
8615         }
8616         return rc;
8617 }
8618
8619 static int bnxt_cfg_rx_mode(struct bnxt *);
8620 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8621
8622 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8623 {
8624         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8625         int rc = 0;
8626         unsigned int rx_nr_rings = bp->rx_nr_rings;
8627
8628         if (irq_re_init) {
8629                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8630                 if (rc) {
8631                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8632                                    rc);
8633                         goto err_out;
8634                 }
8635         }
8636
8637         rc = bnxt_hwrm_ring_alloc(bp);
8638         if (rc) {
8639                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8640                 goto err_out;
8641         }
8642
8643         rc = bnxt_hwrm_ring_grp_alloc(bp);
8644         if (rc) {
8645                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8646                 goto err_out;
8647         }
8648
8649         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8650                 rx_nr_rings--;
8651
8652         /* default vnic 0 */
8653         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8654         if (rc) {
8655                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8656                 goto err_out;
8657         }
8658
8659         rc = bnxt_setup_vnic(bp, 0);
8660         if (rc)
8661                 goto err_out;
8662
8663         if (bp->flags & BNXT_FLAG_RFS) {
8664                 rc = bnxt_alloc_rfs_vnics(bp);
8665                 if (rc)
8666                         goto err_out;
8667         }
8668
8669         if (bp->flags & BNXT_FLAG_TPA) {
8670                 rc = bnxt_set_tpa(bp, true);
8671                 if (rc)
8672                         goto err_out;
8673         }
8674
8675         if (BNXT_VF(bp))
8676                 bnxt_update_vf_mac(bp);
8677
8678         /* Filter for default vnic 0 */
8679         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8680         if (rc) {
8681                 if (BNXT_VF(bp) && rc == -ENODEV)
8682                         netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
8683                 else
8684                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8685                 goto err_out;
8686         }
8687         vnic->uc_filter_count = 1;
8688
8689         vnic->rx_mask = 0;
8690         if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
8691                 goto skip_rx_mask;
8692
8693         if (bp->dev->flags & IFF_BROADCAST)
8694                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8695
8696         if (bp->dev->flags & IFF_PROMISC)
8697                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8698
8699         if (bp->dev->flags & IFF_ALLMULTI) {
8700                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8701                 vnic->mc_list_count = 0;
8702         } else if (bp->dev->flags & IFF_MULTICAST) {
8703                 u32 mask = 0;
8704
8705                 bnxt_mc_list_updated(bp, &mask);
8706                 vnic->rx_mask |= mask;
8707         }
8708
8709         rc = bnxt_cfg_rx_mode(bp);
8710         if (rc)
8711                 goto err_out;
8712
8713 skip_rx_mask:
8714         rc = bnxt_hwrm_set_coal(bp);
8715         if (rc)
8716                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8717                                 rc);
8718
8719         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8720                 rc = bnxt_setup_nitroa0_vnic(bp);
8721                 if (rc)
8722                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8723                                    rc);
8724         }
8725
8726         if (BNXT_VF(bp)) {
8727                 bnxt_hwrm_func_qcfg(bp);
8728                 netdev_update_features(bp->dev);
8729         }
8730
8731         return 0;
8732
8733 err_out:
8734         bnxt_hwrm_resource_free(bp, 0, true);
8735
8736         return rc;
8737 }
8738
8739 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8740 {
8741         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8742         return 0;
8743 }
8744
8745 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8746 {
8747         bnxt_init_cp_rings(bp);
8748         bnxt_init_rx_rings(bp);
8749         bnxt_init_tx_rings(bp);
8750         bnxt_init_ring_grps(bp, irq_re_init);
8751         bnxt_init_vnics(bp);
8752
8753         return bnxt_init_chip(bp, irq_re_init);
8754 }
8755
8756 static int bnxt_set_real_num_queues(struct bnxt *bp)
8757 {
8758         int rc;
8759         struct net_device *dev = bp->dev;
8760
8761         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8762                                           bp->tx_nr_rings_xdp);
8763         if (rc)
8764                 return rc;
8765
8766         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8767         if (rc)
8768                 return rc;
8769
8770 #ifdef CONFIG_RFS_ACCEL
8771         if (bp->flags & BNXT_FLAG_RFS)
8772                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8773 #endif
8774
8775         return rc;
8776 }
8777
8778 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8779                            bool shared)
8780 {
8781         int _rx = *rx, _tx = *tx;
8782
8783         if (shared) {
8784                 *rx = min_t(int, _rx, max);
8785                 *tx = min_t(int, _tx, max);
8786         } else {
8787                 if (max < 2)
8788                         return -ENOMEM;
8789
8790                 while (_rx + _tx > max) {
8791                         if (_rx > _tx && _rx > 1)
8792                                 _rx--;
8793                         else if (_tx > 1)
8794                                 _tx--;
8795                 }
8796                 *rx = _rx;
8797                 *tx = _tx;
8798         }
8799         return 0;
8800 }
8801
8802 static void bnxt_setup_msix(struct bnxt *bp)
8803 {
8804         const int len = sizeof(bp->irq_tbl[0].name);
8805         struct net_device *dev = bp->dev;
8806         int tcs, i;
8807
8808         tcs = netdev_get_num_tc(dev);
8809         if (tcs) {
8810                 int i, off, count;
8811
8812                 for (i = 0; i < tcs; i++) {
8813                         count = bp->tx_nr_rings_per_tc;
8814                         off = i * count;
8815                         netdev_set_tc_queue(dev, i, count, off);
8816                 }
8817         }
8818
8819         for (i = 0; i < bp->cp_nr_rings; i++) {
8820                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8821                 char *attr;
8822
8823                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8824                         attr = "TxRx";
8825                 else if (i < bp->rx_nr_rings)
8826                         attr = "rx";
8827                 else
8828                         attr = "tx";
8829
8830                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8831                          attr, i);
8832                 bp->irq_tbl[map_idx].handler = bnxt_msix;
8833         }
8834 }
8835
8836 static void bnxt_setup_inta(struct bnxt *bp)
8837 {
8838         const int len = sizeof(bp->irq_tbl[0].name);
8839
8840         if (netdev_get_num_tc(bp->dev))
8841                 netdev_reset_tc(bp->dev);
8842
8843         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8844                  0);
8845         bp->irq_tbl[0].handler = bnxt_inta;
8846 }
8847
8848 static int bnxt_init_int_mode(struct bnxt *bp);
8849
8850 static int bnxt_setup_int_mode(struct bnxt *bp)
8851 {
8852         int rc;
8853
8854         if (!bp->irq_tbl) {
8855                 rc = bnxt_init_int_mode(bp);
8856                 if (rc || !bp->irq_tbl)
8857                         return rc ?: -ENODEV;
8858         }
8859
8860         if (bp->flags & BNXT_FLAG_USING_MSIX)
8861                 bnxt_setup_msix(bp);
8862         else
8863                 bnxt_setup_inta(bp);
8864
8865         rc = bnxt_set_real_num_queues(bp);
8866         return rc;
8867 }
8868
8869 #ifdef CONFIG_RFS_ACCEL
8870 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8871 {
8872         return bp->hw_resc.max_rsscos_ctxs;
8873 }
8874
8875 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8876 {
8877         return bp->hw_resc.max_vnics;
8878 }
8879 #endif
8880
8881 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8882 {
8883         return bp->hw_resc.max_stat_ctxs;
8884 }
8885
8886 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8887 {
8888         return bp->hw_resc.max_cp_rings;
8889 }
8890
8891 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8892 {
8893         unsigned int cp = bp->hw_resc.max_cp_rings;
8894
8895         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8896                 cp -= bnxt_get_ulp_msix_num(bp);
8897
8898         return cp;
8899 }
8900
8901 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8902 {
8903         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8904
8905         if (bp->flags & BNXT_FLAG_CHIP_P5)
8906                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8907
8908         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8909 }
8910
8911 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8912 {
8913         bp->hw_resc.max_irqs = max_irqs;
8914 }
8915
8916 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8917 {
8918         unsigned int cp;
8919
8920         cp = bnxt_get_max_func_cp_rings_for_en(bp);
8921         if (bp->flags & BNXT_FLAG_CHIP_P5)
8922                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8923         else
8924                 return cp - bp->cp_nr_rings;
8925 }
8926
8927 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8928 {
8929         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8930 }
8931
8932 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8933 {
8934         int max_cp = bnxt_get_max_func_cp_rings(bp);
8935         int max_irq = bnxt_get_max_func_irqs(bp);
8936         int total_req = bp->cp_nr_rings + num;
8937         int max_idx, avail_msix;
8938
8939         max_idx = bp->total_irqs;
8940         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8941                 max_idx = min_t(int, bp->total_irqs, max_cp);
8942         avail_msix = max_idx - bp->cp_nr_rings;
8943         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8944                 return avail_msix;
8945
8946         if (max_irq < total_req) {
8947                 num = max_irq - bp->cp_nr_rings;
8948                 if (num <= 0)
8949                         return 0;
8950         }
8951         return num;
8952 }
8953
8954 static int bnxt_get_num_msix(struct bnxt *bp)
8955 {
8956         if (!BNXT_NEW_RM(bp))
8957                 return bnxt_get_max_func_irqs(bp);
8958
8959         return bnxt_nq_rings_in_use(bp);
8960 }
8961
8962 static int bnxt_init_msix(struct bnxt *bp)
8963 {
8964         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8965         struct msix_entry *msix_ent;
8966
8967         total_vecs = bnxt_get_num_msix(bp);
8968         max = bnxt_get_max_func_irqs(bp);
8969         if (total_vecs > max)
8970                 total_vecs = max;
8971
8972         if (!total_vecs)
8973                 return 0;
8974
8975         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8976         if (!msix_ent)
8977                 return -ENOMEM;
8978
8979         for (i = 0; i < total_vecs; i++) {
8980                 msix_ent[i].entry = i;
8981                 msix_ent[i].vector = 0;
8982         }
8983
8984         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8985                 min = 2;
8986
8987         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8988         ulp_msix = bnxt_get_ulp_msix_num(bp);
8989         if (total_vecs < 0 || total_vecs < ulp_msix) {
8990                 rc = -ENODEV;
8991                 goto msix_setup_exit;
8992         }
8993
8994         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8995         if (bp->irq_tbl) {
8996                 for (i = 0; i < total_vecs; i++)
8997                         bp->irq_tbl[i].vector = msix_ent[i].vector;
8998
8999                 bp->total_irqs = total_vecs;
9000                 /* Trim rings based upon num of vectors allocated */
9001                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
9002                                      total_vecs - ulp_msix, min == 1);
9003                 if (rc)
9004                         goto msix_setup_exit;
9005
9006                 bp->cp_nr_rings = (min == 1) ?
9007                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9008                                   bp->tx_nr_rings + bp->rx_nr_rings;
9009
9010         } else {
9011                 rc = -ENOMEM;
9012                 goto msix_setup_exit;
9013         }
9014         bp->flags |= BNXT_FLAG_USING_MSIX;
9015         kfree(msix_ent);
9016         return 0;
9017
9018 msix_setup_exit:
9019         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
9020         kfree(bp->irq_tbl);
9021         bp->irq_tbl = NULL;
9022         pci_disable_msix(bp->pdev);
9023         kfree(msix_ent);
9024         return rc;
9025 }
9026
9027 static int bnxt_init_inta(struct bnxt *bp)
9028 {
9029         bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
9030         if (!bp->irq_tbl)
9031                 return -ENOMEM;
9032
9033         bp->total_irqs = 1;
9034         bp->rx_nr_rings = 1;
9035         bp->tx_nr_rings = 1;
9036         bp->cp_nr_rings = 1;
9037         bp->flags |= BNXT_FLAG_SHARED_RINGS;
9038         bp->irq_tbl[0].vector = bp->pdev->irq;
9039         return 0;
9040 }
9041
9042 static int bnxt_init_int_mode(struct bnxt *bp)
9043 {
9044         int rc = -ENODEV;
9045
9046         if (bp->flags & BNXT_FLAG_MSIX_CAP)
9047                 rc = bnxt_init_msix(bp);
9048
9049         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
9050                 /* fallback to INTA */
9051                 rc = bnxt_init_inta(bp);
9052         }
9053         return rc;
9054 }
9055
9056 static void bnxt_clear_int_mode(struct bnxt *bp)
9057 {
9058         if (bp->flags & BNXT_FLAG_USING_MSIX)
9059                 pci_disable_msix(bp->pdev);
9060
9061         kfree(bp->irq_tbl);
9062         bp->irq_tbl = NULL;
9063         bp->flags &= ~BNXT_FLAG_USING_MSIX;
9064 }
9065
9066 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
9067 {
9068         int tcs = netdev_get_num_tc(bp->dev);
9069         bool irq_cleared = false;
9070         int rc;
9071
9072         if (!bnxt_need_reserve_rings(bp))
9073                 return 0;
9074
9075         if (irq_re_init && BNXT_NEW_RM(bp) &&
9076             bnxt_get_num_msix(bp) != bp->total_irqs) {
9077                 bnxt_ulp_irq_stop(bp);
9078                 bnxt_clear_int_mode(bp);
9079                 irq_cleared = true;
9080         }
9081         rc = __bnxt_reserve_rings(bp);
9082         if (irq_cleared) {
9083                 if (!rc)
9084                         rc = bnxt_init_int_mode(bp);
9085                 bnxt_ulp_irq_restart(bp, rc);
9086         }
9087         if (rc) {
9088                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9089                 return rc;
9090         }
9091         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
9092                 netdev_err(bp->dev, "tx ring reservation failure\n");
9093                 netdev_reset_tc(bp->dev);
9094                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9095                 return -ENOMEM;
9096         }
9097         return 0;
9098 }
9099
9100 static void bnxt_free_irq(struct bnxt *bp)
9101 {
9102         struct bnxt_irq *irq;
9103         int i;
9104
9105 #ifdef CONFIG_RFS_ACCEL
9106         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9107         bp->dev->rx_cpu_rmap = NULL;
9108 #endif
9109         if (!bp->irq_tbl || !bp->bnapi)
9110                 return;
9111
9112         for (i = 0; i < bp->cp_nr_rings; i++) {
9113                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9114
9115                 irq = &bp->irq_tbl[map_idx];
9116                 if (irq->requested) {
9117                         if (irq->have_cpumask) {
9118                                 irq_set_affinity_hint(irq->vector, NULL);
9119                                 free_cpumask_var(irq->cpu_mask);
9120                                 irq->have_cpumask = 0;
9121                         }
9122                         free_irq(irq->vector, bp->bnapi[i]);
9123                 }
9124
9125                 irq->requested = 0;
9126         }
9127 }
9128
9129 static int bnxt_request_irq(struct bnxt *bp)
9130 {
9131         int i, j, rc = 0;
9132         unsigned long flags = 0;
9133 #ifdef CONFIG_RFS_ACCEL
9134         struct cpu_rmap *rmap;
9135 #endif
9136
9137         rc = bnxt_setup_int_mode(bp);
9138         if (rc) {
9139                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9140                            rc);
9141                 return rc;
9142         }
9143 #ifdef CONFIG_RFS_ACCEL
9144         rmap = bp->dev->rx_cpu_rmap;
9145 #endif
9146         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9147                 flags = IRQF_SHARED;
9148
9149         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9150                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9151                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9152
9153 #ifdef CONFIG_RFS_ACCEL
9154                 if (rmap && bp->bnapi[i]->rx_ring) {
9155                         rc = irq_cpu_rmap_add(rmap, irq->vector);
9156                         if (rc)
9157                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9158                                             j);
9159                         j++;
9160                 }
9161 #endif
9162                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9163                                  bp->bnapi[i]);
9164                 if (rc)
9165                         break;
9166
9167                 irq->requested = 1;
9168
9169                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9170                         int numa_node = dev_to_node(&bp->pdev->dev);
9171
9172                         irq->have_cpumask = 1;
9173                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9174                                         irq->cpu_mask);
9175                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9176                         if (rc) {
9177                                 netdev_warn(bp->dev,
9178                                             "Set affinity failed, IRQ = %d\n",
9179                                             irq->vector);
9180                                 break;
9181                         }
9182                 }
9183         }
9184         return rc;
9185 }
9186
9187 static void bnxt_del_napi(struct bnxt *bp)
9188 {
9189         int i;
9190
9191         if (!bp->bnapi)
9192                 return;
9193
9194         for (i = 0; i < bp->cp_nr_rings; i++) {
9195                 struct bnxt_napi *bnapi = bp->bnapi[i];
9196
9197                 __netif_napi_del(&bnapi->napi);
9198         }
9199         /* We called __netif_napi_del(), we need
9200          * to respect an RCU grace period before freeing napi structures.
9201          */
9202         synchronize_net();
9203 }
9204
9205 static void bnxt_init_napi(struct bnxt *bp)
9206 {
9207         int i;
9208         unsigned int cp_nr_rings = bp->cp_nr_rings;
9209         struct bnxt_napi *bnapi;
9210
9211         if (bp->flags & BNXT_FLAG_USING_MSIX) {
9212                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9213
9214                 if (bp->flags & BNXT_FLAG_CHIP_P5)
9215                         poll_fn = bnxt_poll_p5;
9216                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9217                         cp_nr_rings--;
9218                 for (i = 0; i < cp_nr_rings; i++) {
9219                         bnapi = bp->bnapi[i];
9220                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
9221                 }
9222                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9223                         bnapi = bp->bnapi[cp_nr_rings];
9224                         netif_napi_add(bp->dev, &bnapi->napi,
9225                                        bnxt_poll_nitroa0, 64);
9226                 }
9227         } else {
9228                 bnapi = bp->bnapi[0];
9229                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
9230         }
9231 }
9232
9233 static void bnxt_disable_napi(struct bnxt *bp)
9234 {
9235         int i;
9236
9237         if (!bp->bnapi ||
9238             test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9239                 return;
9240
9241         for (i = 0; i < bp->cp_nr_rings; i++) {
9242                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9243
9244                 napi_disable(&bp->bnapi[i]->napi);
9245                 if (bp->bnapi[i]->rx_ring)
9246                         cancel_work_sync(&cpr->dim.work);
9247         }
9248 }
9249
9250 static void bnxt_enable_napi(struct bnxt *bp)
9251 {
9252         int i;
9253
9254         clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9255         for (i = 0; i < bp->cp_nr_rings; i++) {
9256                 struct bnxt_napi *bnapi = bp->bnapi[i];
9257                 struct bnxt_cp_ring_info *cpr;
9258
9259                 cpr = &bnapi->cp_ring;
9260                 if (bnapi->in_reset)
9261                         cpr->sw_stats.rx.rx_resets++;
9262                 bnapi->in_reset = false;
9263
9264                 if (bnapi->rx_ring) {
9265                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9266                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9267                 }
9268                 napi_enable(&bnapi->napi);
9269         }
9270 }
9271
9272 void bnxt_tx_disable(struct bnxt *bp)
9273 {
9274         int i;
9275         struct bnxt_tx_ring_info *txr;
9276
9277         if (bp->tx_ring) {
9278                 for (i = 0; i < bp->tx_nr_rings; i++) {
9279                         txr = &bp->tx_ring[i];
9280                         WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
9281                 }
9282         }
9283         /* Make sure napi polls see @dev_state change */
9284         synchronize_net();
9285         /* Drop carrier first to prevent TX timeout */
9286         netif_carrier_off(bp->dev);
9287         /* Stop all TX queues */
9288         netif_tx_disable(bp->dev);
9289 }
9290
9291 void bnxt_tx_enable(struct bnxt *bp)
9292 {
9293         int i;
9294         struct bnxt_tx_ring_info *txr;
9295
9296         for (i = 0; i < bp->tx_nr_rings; i++) {
9297                 txr = &bp->tx_ring[i];
9298                 WRITE_ONCE(txr->dev_state, 0);
9299         }
9300         /* Make sure napi polls see @dev_state change */
9301         synchronize_net();
9302         netif_tx_wake_all_queues(bp->dev);
9303         if (BNXT_LINK_IS_UP(bp))
9304                 netif_carrier_on(bp->dev);
9305 }
9306
9307 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9308 {
9309         u8 active_fec = link_info->active_fec_sig_mode &
9310                         PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9311
9312         switch (active_fec) {
9313         default:
9314         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9315                 return "None";
9316         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9317                 return "Clause 74 BaseR";
9318         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9319                 return "Clause 91 RS(528,514)";
9320         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9321                 return "Clause 91 RS544_1XN";
9322         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9323                 return "Clause 91 RS(544,514)";
9324         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9325                 return "Clause 91 RS272_1XN";
9326         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9327                 return "Clause 91 RS(272,257)";
9328         }
9329 }
9330
9331 void bnxt_report_link(struct bnxt *bp)
9332 {
9333         if (BNXT_LINK_IS_UP(bp)) {
9334                 const char *signal = "";
9335                 const char *flow_ctrl;
9336                 const char *duplex;
9337                 u32 speed;
9338                 u16 fec;
9339
9340                 netif_carrier_on(bp->dev);
9341                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9342                 if (speed == SPEED_UNKNOWN) {
9343                         netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9344                         return;
9345                 }
9346                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9347                         duplex = "full";
9348                 else
9349                         duplex = "half";
9350                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9351                         flow_ctrl = "ON - receive & transmit";
9352                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9353                         flow_ctrl = "ON - transmit";
9354                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9355                         flow_ctrl = "ON - receive";
9356                 else
9357                         flow_ctrl = "none";
9358                 if (bp->link_info.phy_qcfg_resp.option_flags &
9359                     PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9360                         u8 sig_mode = bp->link_info.active_fec_sig_mode &
9361                                       PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9362                         switch (sig_mode) {
9363                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9364                                 signal = "(NRZ) ";
9365                                 break;
9366                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9367                                 signal = "(PAM4) ";
9368                                 break;
9369                         default:
9370                                 break;
9371                         }
9372                 }
9373                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9374                             speed, signal, duplex, flow_ctrl);
9375                 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9376                         netdev_info(bp->dev, "EEE is %s\n",
9377                                     bp->eee.eee_active ? "active" :
9378                                                          "not active");
9379                 fec = bp->link_info.fec_cfg;
9380                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9381                         netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9382                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9383                                     bnxt_report_fec(&bp->link_info));
9384         } else {
9385                 netif_carrier_off(bp->dev);
9386                 netdev_err(bp->dev, "NIC Link is Down\n");
9387         }
9388 }
9389
9390 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9391 {
9392         if (!resp->supported_speeds_auto_mode &&
9393             !resp->supported_speeds_force_mode &&
9394             !resp->supported_pam4_speeds_auto_mode &&
9395             !resp->supported_pam4_speeds_force_mode)
9396                 return true;
9397         return false;
9398 }
9399
9400 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9401 {
9402         struct bnxt_link_info *link_info = &bp->link_info;
9403         struct hwrm_port_phy_qcaps_output *resp;
9404         struct hwrm_port_phy_qcaps_input *req;
9405         int rc = 0;
9406
9407         if (bp->hwrm_spec_code < 0x10201)
9408                 return 0;
9409
9410         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9411         if (rc)
9412                 return rc;
9413
9414         resp = hwrm_req_hold(bp, req);
9415         rc = hwrm_req_send(bp, req);
9416         if (rc)
9417                 goto hwrm_phy_qcaps_exit;
9418
9419         bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
9420         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9421                 struct ethtool_eee *eee = &bp->eee;
9422                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9423
9424                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9425                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9426                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9427                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9428                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9429         }
9430
9431         if (bp->hwrm_spec_code >= 0x10a01) {
9432                 if (bnxt_phy_qcaps_no_speed(resp)) {
9433                         link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9434                         netdev_warn(bp->dev, "Ethernet link disabled\n");
9435                 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9436                         link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9437                         netdev_info(bp->dev, "Ethernet link enabled\n");
9438                         /* Phy re-enabled, reprobe the speeds */
9439                         link_info->support_auto_speeds = 0;
9440                         link_info->support_pam4_auto_speeds = 0;
9441                 }
9442         }
9443         if (resp->supported_speeds_auto_mode)
9444                 link_info->support_auto_speeds =
9445                         le16_to_cpu(resp->supported_speeds_auto_mode);
9446         if (resp->supported_pam4_speeds_auto_mode)
9447                 link_info->support_pam4_auto_speeds =
9448                         le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9449
9450         bp->port_count = resp->port_cnt;
9451
9452 hwrm_phy_qcaps_exit:
9453         hwrm_req_drop(bp, req);
9454         return rc;
9455 }
9456
9457 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9458 {
9459         u16 diff = advertising ^ supported;
9460
9461         return ((supported | diff) != supported);
9462 }
9463
9464 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9465 {
9466         struct bnxt_link_info *link_info = &bp->link_info;
9467         struct hwrm_port_phy_qcfg_output *resp;
9468         struct hwrm_port_phy_qcfg_input *req;
9469         u8 link_state = link_info->link_state;
9470         bool support_changed = false;
9471         int rc;
9472
9473         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9474         if (rc)
9475                 return rc;
9476
9477         resp = hwrm_req_hold(bp, req);
9478         rc = hwrm_req_send(bp, req);
9479         if (rc) {
9480                 hwrm_req_drop(bp, req);
9481                 if (BNXT_VF(bp) && rc == -ENODEV) {
9482                         netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
9483                         rc = 0;
9484                 }
9485                 return rc;
9486         }
9487
9488         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9489         link_info->phy_link_status = resp->link;
9490         link_info->duplex = resp->duplex_cfg;
9491         if (bp->hwrm_spec_code >= 0x10800)
9492                 link_info->duplex = resp->duplex_state;
9493         link_info->pause = resp->pause;
9494         link_info->auto_mode = resp->auto_mode;
9495         link_info->auto_pause_setting = resp->auto_pause;
9496         link_info->lp_pause = resp->link_partner_adv_pause;
9497         link_info->force_pause_setting = resp->force_pause;
9498         link_info->duplex_setting = resp->duplex_cfg;
9499         if (link_info->phy_link_status == BNXT_LINK_LINK)
9500                 link_info->link_speed = le16_to_cpu(resp->link_speed);
9501         else
9502                 link_info->link_speed = 0;
9503         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9504         link_info->force_pam4_link_speed =
9505                 le16_to_cpu(resp->force_pam4_link_speed);
9506         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9507         link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9508         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9509         link_info->auto_pam4_link_speeds =
9510                 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9511         link_info->lp_auto_link_speeds =
9512                 le16_to_cpu(resp->link_partner_adv_speeds);
9513         link_info->lp_auto_pam4_link_speeds =
9514                 resp->link_partner_pam4_adv_speeds;
9515         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9516         link_info->phy_ver[0] = resp->phy_maj;
9517         link_info->phy_ver[1] = resp->phy_min;
9518         link_info->phy_ver[2] = resp->phy_bld;
9519         link_info->media_type = resp->media_type;
9520         link_info->phy_type = resp->phy_type;
9521         link_info->transceiver = resp->xcvr_pkg_type;
9522         link_info->phy_addr = resp->eee_config_phy_addr &
9523                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9524         link_info->module_status = resp->module_status;
9525
9526         if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9527                 struct ethtool_eee *eee = &bp->eee;
9528                 u16 fw_speeds;
9529
9530                 eee->eee_active = 0;
9531                 if (resp->eee_config_phy_addr &
9532                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9533                         eee->eee_active = 1;
9534                         fw_speeds = le16_to_cpu(
9535                                 resp->link_partner_adv_eee_link_speed_mask);
9536                         eee->lp_advertised =
9537                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9538                 }
9539
9540                 /* Pull initial EEE config */
9541                 if (!chng_link_state) {
9542                         if (resp->eee_config_phy_addr &
9543                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9544                                 eee->eee_enabled = 1;
9545
9546                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9547                         eee->advertised =
9548                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9549
9550                         if (resp->eee_config_phy_addr &
9551                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9552                                 __le32 tmr;
9553
9554                                 eee->tx_lpi_enabled = 1;
9555                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9556                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9557                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9558                         }
9559                 }
9560         }
9561
9562         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9563         if (bp->hwrm_spec_code >= 0x10504) {
9564                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9565                 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9566         }
9567         /* TODO: need to add more logic to report VF link */
9568         if (chng_link_state) {
9569                 if (link_info->phy_link_status == BNXT_LINK_LINK)
9570                         link_info->link_state = BNXT_LINK_STATE_UP;
9571                 else
9572                         link_info->link_state = BNXT_LINK_STATE_DOWN;
9573                 if (link_state != link_info->link_state)
9574                         bnxt_report_link(bp);
9575         } else {
9576                 /* always link down if not require to update link state */
9577                 link_info->link_state = BNXT_LINK_STATE_DOWN;
9578         }
9579         hwrm_req_drop(bp, req);
9580
9581         if (!BNXT_PHY_CFG_ABLE(bp))
9582                 return 0;
9583
9584         /* Check if any advertised speeds are no longer supported. The caller
9585          * holds the link_lock mutex, so we can modify link_info settings.
9586          */
9587         if (bnxt_support_dropped(link_info->advertising,
9588                                  link_info->support_auto_speeds)) {
9589                 link_info->advertising = link_info->support_auto_speeds;
9590                 support_changed = true;
9591         }
9592         if (bnxt_support_dropped(link_info->advertising_pam4,
9593                                  link_info->support_pam4_auto_speeds)) {
9594                 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9595                 support_changed = true;
9596         }
9597         if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9598                 bnxt_hwrm_set_link_setting(bp, true, false);
9599         return 0;
9600 }
9601
9602 static void bnxt_get_port_module_status(struct bnxt *bp)
9603 {
9604         struct bnxt_link_info *link_info = &bp->link_info;
9605         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9606         u8 module_status;
9607
9608         if (bnxt_update_link(bp, true))
9609                 return;
9610
9611         module_status = link_info->module_status;
9612         switch (module_status) {
9613         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9614         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9615         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9616                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9617                             bp->pf.port_id);
9618                 if (bp->hwrm_spec_code >= 0x10201) {
9619                         netdev_warn(bp->dev, "Module part number %s\n",
9620                                     resp->phy_vendor_partnumber);
9621                 }
9622                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9623                         netdev_warn(bp->dev, "TX is disabled\n");
9624                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9625                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9626         }
9627 }
9628
9629 static void
9630 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9631 {
9632         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9633                 if (bp->hwrm_spec_code >= 0x10201)
9634                         req->auto_pause =
9635                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9636                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9637                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9638                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9639                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9640                 req->enables |=
9641                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9642         } else {
9643                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9644                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9645                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9646                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9647                 req->enables |=
9648                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9649                 if (bp->hwrm_spec_code >= 0x10201) {
9650                         req->auto_pause = req->force_pause;
9651                         req->enables |= cpu_to_le32(
9652                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9653                 }
9654         }
9655 }
9656
9657 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9658 {
9659         if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9660                 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9661                 if (bp->link_info.advertising) {
9662                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9663                         req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9664                 }
9665                 if (bp->link_info.advertising_pam4) {
9666                         req->enables |=
9667                                 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9668                         req->auto_link_pam4_speed_mask =
9669                                 cpu_to_le16(bp->link_info.advertising_pam4);
9670                 }
9671                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9672                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9673         } else {
9674                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9675                 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9676                         req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9677                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9678                 } else {
9679                         req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9680                 }
9681         }
9682
9683         /* tell chimp that the setting takes effect immediately */
9684         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9685 }
9686
9687 int bnxt_hwrm_set_pause(struct bnxt *bp)
9688 {
9689         struct hwrm_port_phy_cfg_input *req;
9690         int rc;
9691
9692         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9693         if (rc)
9694                 return rc;
9695
9696         bnxt_hwrm_set_pause_common(bp, req);
9697
9698         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9699             bp->link_info.force_link_chng)
9700                 bnxt_hwrm_set_link_common(bp, req);
9701
9702         rc = hwrm_req_send(bp, req);
9703         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9704                 /* since changing of pause setting doesn't trigger any link
9705                  * change event, the driver needs to update the current pause
9706                  * result upon successfully return of the phy_cfg command
9707                  */
9708                 bp->link_info.pause =
9709                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9710                 bp->link_info.auto_pause_setting = 0;
9711                 if (!bp->link_info.force_link_chng)
9712                         bnxt_report_link(bp);
9713         }
9714         bp->link_info.force_link_chng = false;
9715         return rc;
9716 }
9717
9718 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9719                               struct hwrm_port_phy_cfg_input *req)
9720 {
9721         struct ethtool_eee *eee = &bp->eee;
9722
9723         if (eee->eee_enabled) {
9724                 u16 eee_speeds;
9725                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9726
9727                 if (eee->tx_lpi_enabled)
9728                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9729                 else
9730                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9731
9732                 req->flags |= cpu_to_le32(flags);
9733                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9734                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9735                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9736         } else {
9737                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9738         }
9739 }
9740
9741 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9742 {
9743         struct hwrm_port_phy_cfg_input *req;
9744         int rc;
9745
9746         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9747         if (rc)
9748                 return rc;
9749
9750         if (set_pause)
9751                 bnxt_hwrm_set_pause_common(bp, req);
9752
9753         bnxt_hwrm_set_link_common(bp, req);
9754
9755         if (set_eee)
9756                 bnxt_hwrm_set_eee(bp, req);
9757         return hwrm_req_send(bp, req);
9758 }
9759
9760 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9761 {
9762         struct hwrm_port_phy_cfg_input *req;
9763         int rc;
9764
9765         if (!BNXT_SINGLE_PF(bp))
9766                 return 0;
9767
9768         if (pci_num_vf(bp->pdev) &&
9769             !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9770                 return 0;
9771
9772         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9773         if (rc)
9774                 return rc;
9775
9776         req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9777         rc = hwrm_req_send(bp, req);
9778         if (!rc) {
9779                 mutex_lock(&bp->link_lock);
9780                 /* Device is not obliged link down in certain scenarios, even
9781                  * when forced. Setting the state unknown is consistent with
9782                  * driver startup and will force link state to be reported
9783                  * during subsequent open based on PORT_PHY_QCFG.
9784                  */
9785                 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
9786                 mutex_unlock(&bp->link_lock);
9787         }
9788         return rc;
9789 }
9790
9791 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9792 {
9793 #ifdef CONFIG_TEE_BNXT_FW
9794         int rc = tee_bnxt_fw_load();
9795
9796         if (rc)
9797                 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9798
9799         return rc;
9800 #else
9801         netdev_err(bp->dev, "OP-TEE not supported\n");
9802         return -ENODEV;
9803 #endif
9804 }
9805
9806 static int bnxt_try_recover_fw(struct bnxt *bp)
9807 {
9808         if (bp->fw_health && bp->fw_health->status_reliable) {
9809                 int retry = 0, rc;
9810                 u32 sts;
9811
9812                 do {
9813                         sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9814                         rc = bnxt_hwrm_poll(bp);
9815                         if (!BNXT_FW_IS_BOOTING(sts) &&
9816                             !BNXT_FW_IS_RECOVERING(sts))
9817                                 break;
9818                         retry++;
9819                 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9820
9821                 if (!BNXT_FW_IS_HEALTHY(sts)) {
9822                         netdev_err(bp->dev,
9823                                    "Firmware not responding, status: 0x%x\n",
9824                                    sts);
9825                         rc = -ENODEV;
9826                 }
9827                 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9828                         netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9829                         return bnxt_fw_reset_via_optee(bp);
9830                 }
9831                 return rc;
9832         }
9833
9834         return -ENODEV;
9835 }
9836
9837 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
9838 {
9839         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9840         int rc;
9841
9842         if (!BNXT_NEW_RM(bp))
9843                 return 0; /* no resource reservations required */
9844
9845         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9846         if (rc)
9847                 netdev_err(bp->dev, "resc_qcaps failed\n");
9848
9849         hw_resc->resv_cp_rings = 0;
9850         hw_resc->resv_stat_ctxs = 0;
9851         hw_resc->resv_irqs = 0;
9852         hw_resc->resv_tx_rings = 0;
9853         hw_resc->resv_rx_rings = 0;
9854         hw_resc->resv_hw_ring_grps = 0;
9855         hw_resc->resv_vnics = 0;
9856         if (!fw_reset) {
9857                 bp->tx_nr_rings = 0;
9858                 bp->rx_nr_rings = 0;
9859         }
9860
9861         return rc;
9862 }
9863
9864 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9865 {
9866         struct hwrm_func_drv_if_change_output *resp;
9867         struct hwrm_func_drv_if_change_input *req;
9868         bool fw_reset = !bp->irq_tbl;
9869         bool resc_reinit = false;
9870         int rc, retry = 0;
9871         u32 flags = 0;
9872
9873         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9874                 return 0;
9875
9876         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
9877         if (rc)
9878                 return rc;
9879
9880         if (up)
9881                 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9882         resp = hwrm_req_hold(bp, req);
9883
9884         hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
9885         while (retry < BNXT_FW_IF_RETRY) {
9886                 rc = hwrm_req_send(bp, req);
9887                 if (rc != -EAGAIN)
9888                         break;
9889
9890                 msleep(50);
9891                 retry++;
9892         }
9893
9894         if (rc == -EAGAIN) {
9895                 hwrm_req_drop(bp, req);
9896                 return rc;
9897         } else if (!rc) {
9898                 flags = le32_to_cpu(resp->flags);
9899         } else if (up) {
9900                 rc = bnxt_try_recover_fw(bp);
9901                 fw_reset = true;
9902         }
9903         hwrm_req_drop(bp, req);
9904         if (rc)
9905                 return rc;
9906
9907         if (!up) {
9908                 bnxt_inv_fw_health_reg(bp);
9909                 return 0;
9910         }
9911
9912         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9913                 resc_reinit = true;
9914         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9915                 fw_reset = true;
9916         else
9917                 bnxt_remap_fw_health_regs(bp);
9918
9919         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9920                 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9921                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9922                 return -ENODEV;
9923         }
9924         if (resc_reinit || fw_reset) {
9925                 if (fw_reset) {
9926                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9927                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9928                                 bnxt_ulp_stop(bp);
9929                         bnxt_free_ctx_mem(bp);
9930                         kfree(bp->ctx);
9931                         bp->ctx = NULL;
9932                         bnxt_dcb_free(bp);
9933                         rc = bnxt_fw_init_one(bp);
9934                         if (rc) {
9935                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9936                                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9937                                 return rc;
9938                         }
9939                         bnxt_clear_int_mode(bp);
9940                         rc = bnxt_init_int_mode(bp);
9941                         if (rc) {
9942                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9943                                 netdev_err(bp->dev, "init int mode failed\n");
9944                                 return rc;
9945                         }
9946                 }
9947                 rc = bnxt_cancel_reservations(bp, fw_reset);
9948         }
9949         return rc;
9950 }
9951
9952 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9953 {
9954         struct hwrm_port_led_qcaps_output *resp;
9955         struct hwrm_port_led_qcaps_input *req;
9956         struct bnxt_pf_info *pf = &bp->pf;
9957         int rc;
9958
9959         bp->num_leds = 0;
9960         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9961                 return 0;
9962
9963         rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
9964         if (rc)
9965                 return rc;
9966
9967         req->port_id = cpu_to_le16(pf->port_id);
9968         resp = hwrm_req_hold(bp, req);
9969         rc = hwrm_req_send(bp, req);
9970         if (rc) {
9971                 hwrm_req_drop(bp, req);
9972                 return rc;
9973         }
9974         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9975                 int i;
9976
9977                 bp->num_leds = resp->num_leds;
9978                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9979                                                  bp->num_leds);
9980                 for (i = 0; i < bp->num_leds; i++) {
9981                         struct bnxt_led_info *led = &bp->leds[i];
9982                         __le16 caps = led->led_state_caps;
9983
9984                         if (!led->led_group_id ||
9985                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
9986                                 bp->num_leds = 0;
9987                                 break;
9988                         }
9989                 }
9990         }
9991         hwrm_req_drop(bp, req);
9992         return 0;
9993 }
9994
9995 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9996 {
9997         struct hwrm_wol_filter_alloc_output *resp;
9998         struct hwrm_wol_filter_alloc_input *req;
9999         int rc;
10000
10001         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
10002         if (rc)
10003                 return rc;
10004
10005         req->port_id = cpu_to_le16(bp->pf.port_id);
10006         req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
10007         req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
10008         memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
10009
10010         resp = hwrm_req_hold(bp, req);
10011         rc = hwrm_req_send(bp, req);
10012         if (!rc)
10013                 bp->wol_filter_id = resp->wol_filter_id;
10014         hwrm_req_drop(bp, req);
10015         return rc;
10016 }
10017
10018 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
10019 {
10020         struct hwrm_wol_filter_free_input *req;
10021         int rc;
10022
10023         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
10024         if (rc)
10025                 return rc;
10026
10027         req->port_id = cpu_to_le16(bp->pf.port_id);
10028         req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
10029         req->wol_filter_id = bp->wol_filter_id;
10030
10031         return hwrm_req_send(bp, req);
10032 }
10033
10034 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
10035 {
10036         struct hwrm_wol_filter_qcfg_output *resp;
10037         struct hwrm_wol_filter_qcfg_input *req;
10038         u16 next_handle = 0;
10039         int rc;
10040
10041         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
10042         if (rc)
10043                 return rc;
10044
10045         req->port_id = cpu_to_le16(bp->pf.port_id);
10046         req->handle = cpu_to_le16(handle);
10047         resp = hwrm_req_hold(bp, req);
10048         rc = hwrm_req_send(bp, req);
10049         if (!rc) {
10050                 next_handle = le16_to_cpu(resp->next_handle);
10051                 if (next_handle != 0) {
10052                         if (resp->wol_type ==
10053                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
10054                                 bp->wol = 1;
10055                                 bp->wol_filter_id = resp->wol_filter_id;
10056                         }
10057                 }
10058         }
10059         hwrm_req_drop(bp, req);
10060         return next_handle;
10061 }
10062
10063 static void bnxt_get_wol_settings(struct bnxt *bp)
10064 {
10065         u16 handle = 0;
10066
10067         bp->wol = 0;
10068         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
10069                 return;
10070
10071         do {
10072                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
10073         } while (handle && handle != 0xffff);
10074 }
10075
10076 #ifdef CONFIG_BNXT_HWMON
10077 static ssize_t bnxt_show_temp(struct device *dev,
10078                               struct device_attribute *devattr, char *buf)
10079 {
10080         struct hwrm_temp_monitor_query_output *resp;
10081         struct hwrm_temp_monitor_query_input *req;
10082         struct bnxt *bp = dev_get_drvdata(dev);
10083         u32 len = 0;
10084         int rc;
10085
10086         rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10087         if (rc)
10088                 return rc;
10089         resp = hwrm_req_hold(bp, req);
10090         rc = hwrm_req_send(bp, req);
10091         if (!rc)
10092                 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
10093         hwrm_req_drop(bp, req);
10094         if (rc)
10095                 return rc;
10096         return len;
10097 }
10098 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
10099
10100 static struct attribute *bnxt_attrs[] = {
10101         &sensor_dev_attr_temp1_input.dev_attr.attr,
10102         NULL
10103 };
10104 ATTRIBUTE_GROUPS(bnxt);
10105
10106 static void bnxt_hwmon_close(struct bnxt *bp)
10107 {
10108         if (bp->hwmon_dev) {
10109                 hwmon_device_unregister(bp->hwmon_dev);
10110                 bp->hwmon_dev = NULL;
10111         }
10112 }
10113
10114 static void bnxt_hwmon_open(struct bnxt *bp)
10115 {
10116         struct hwrm_temp_monitor_query_input *req;
10117         struct pci_dev *pdev = bp->pdev;
10118         int rc;
10119
10120         rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10121         if (!rc)
10122                 rc = hwrm_req_send_silent(bp, req);
10123         if (rc == -EACCES || rc == -EOPNOTSUPP) {
10124                 bnxt_hwmon_close(bp);
10125                 return;
10126         }
10127
10128         if (bp->hwmon_dev)
10129                 return;
10130
10131         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10132                                                           DRV_MODULE_NAME, bp,
10133                                                           bnxt_groups);
10134         if (IS_ERR(bp->hwmon_dev)) {
10135                 bp->hwmon_dev = NULL;
10136                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10137         }
10138 }
10139 #else
10140 static void bnxt_hwmon_close(struct bnxt *bp)
10141 {
10142 }
10143
10144 static void bnxt_hwmon_open(struct bnxt *bp)
10145 {
10146 }
10147 #endif
10148
10149 static bool bnxt_eee_config_ok(struct bnxt *bp)
10150 {
10151         struct ethtool_eee *eee = &bp->eee;
10152         struct bnxt_link_info *link_info = &bp->link_info;
10153
10154         if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10155                 return true;
10156
10157         if (eee->eee_enabled) {
10158                 u32 advertising =
10159                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10160
10161                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10162                         eee->eee_enabled = 0;
10163                         return false;
10164                 }
10165                 if (eee->advertised & ~advertising) {
10166                         eee->advertised = advertising & eee->supported;
10167                         return false;
10168                 }
10169         }
10170         return true;
10171 }
10172
10173 static int bnxt_update_phy_setting(struct bnxt *bp)
10174 {
10175         int rc;
10176         bool update_link = false;
10177         bool update_pause = false;
10178         bool update_eee = false;
10179         struct bnxt_link_info *link_info = &bp->link_info;
10180
10181         rc = bnxt_update_link(bp, true);
10182         if (rc) {
10183                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10184                            rc);
10185                 return rc;
10186         }
10187         if (!BNXT_SINGLE_PF(bp))
10188                 return 0;
10189
10190         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10191             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10192             link_info->req_flow_ctrl)
10193                 update_pause = true;
10194         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10195             link_info->force_pause_setting != link_info->req_flow_ctrl)
10196                 update_pause = true;
10197         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10198                 if (BNXT_AUTO_MODE(link_info->auto_mode))
10199                         update_link = true;
10200                 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10201                     link_info->req_link_speed != link_info->force_link_speed)
10202                         update_link = true;
10203                 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10204                          link_info->req_link_speed != link_info->force_pam4_link_speed)
10205                         update_link = true;
10206                 if (link_info->req_duplex != link_info->duplex_setting)
10207                         update_link = true;
10208         } else {
10209                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10210                         update_link = true;
10211                 if (link_info->advertising != link_info->auto_link_speeds ||
10212                     link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10213                         update_link = true;
10214         }
10215
10216         /* The last close may have shutdown the link, so need to call
10217          * PHY_CFG to bring it back up.
10218          */
10219         if (!BNXT_LINK_IS_UP(bp))
10220                 update_link = true;
10221
10222         if (!bnxt_eee_config_ok(bp))
10223                 update_eee = true;
10224
10225         if (update_link)
10226                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10227         else if (update_pause)
10228                 rc = bnxt_hwrm_set_pause(bp);
10229         if (rc) {
10230                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10231                            rc);
10232                 return rc;
10233         }
10234
10235         return rc;
10236 }
10237
10238 /* Common routine to pre-map certain register block to different GRC window.
10239  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10240  * in PF and 3 windows in VF that can be customized to map in different
10241  * register blocks.
10242  */
10243 static void bnxt_preset_reg_win(struct bnxt *bp)
10244 {
10245         if (BNXT_PF(bp)) {
10246                 /* CAG registers map to GRC window #4 */
10247                 writel(BNXT_CAG_REG_BASE,
10248                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10249         }
10250 }
10251
10252 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10253
10254 static int bnxt_reinit_after_abort(struct bnxt *bp)
10255 {
10256         int rc;
10257
10258         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10259                 return -EBUSY;
10260
10261         if (bp->dev->reg_state == NETREG_UNREGISTERED)
10262                 return -ENODEV;
10263
10264         rc = bnxt_fw_init_one(bp);
10265         if (!rc) {
10266                 bnxt_clear_int_mode(bp);
10267                 rc = bnxt_init_int_mode(bp);
10268                 if (!rc) {
10269                         clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10270                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10271                 }
10272         }
10273         return rc;
10274 }
10275
10276 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10277 {
10278         int rc = 0;
10279
10280         bnxt_preset_reg_win(bp);
10281         netif_carrier_off(bp->dev);
10282         if (irq_re_init) {
10283                 /* Reserve rings now if none were reserved at driver probe. */
10284                 rc = bnxt_init_dflt_ring_mode(bp);
10285                 if (rc) {
10286                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10287                         return rc;
10288                 }
10289         }
10290         rc = bnxt_reserve_rings(bp, irq_re_init);
10291         if (rc)
10292                 return rc;
10293         if ((bp->flags & BNXT_FLAG_RFS) &&
10294             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10295                 /* disable RFS if falling back to INTA */
10296                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10297                 bp->flags &= ~BNXT_FLAG_RFS;
10298         }
10299
10300         rc = bnxt_alloc_mem(bp, irq_re_init);
10301         if (rc) {
10302                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10303                 goto open_err_free_mem;
10304         }
10305
10306         if (irq_re_init) {
10307                 bnxt_init_napi(bp);
10308                 rc = bnxt_request_irq(bp);
10309                 if (rc) {
10310                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10311                         goto open_err_irq;
10312                 }
10313         }
10314
10315         rc = bnxt_init_nic(bp, irq_re_init);
10316         if (rc) {
10317                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10318                 goto open_err_irq;
10319         }
10320
10321         bnxt_enable_napi(bp);
10322         bnxt_debug_dev_init(bp);
10323
10324         if (link_re_init) {
10325                 mutex_lock(&bp->link_lock);
10326                 rc = bnxt_update_phy_setting(bp);
10327                 mutex_unlock(&bp->link_lock);
10328                 if (rc) {
10329                         netdev_warn(bp->dev, "failed to update phy settings\n");
10330                         if (BNXT_SINGLE_PF(bp)) {
10331                                 bp->link_info.phy_retry = true;
10332                                 bp->link_info.phy_retry_expires =
10333                                         jiffies + 5 * HZ;
10334                         }
10335                 }
10336         }
10337
10338         if (irq_re_init)
10339                 udp_tunnel_nic_reset_ntf(bp->dev);
10340
10341         set_bit(BNXT_STATE_OPEN, &bp->state);
10342         bnxt_enable_int(bp);
10343         /* Enable TX queues */
10344         bnxt_tx_enable(bp);
10345         mod_timer(&bp->timer, jiffies + bp->current_interval);
10346         /* Poll link status and check for SFP+ module status */
10347         mutex_lock(&bp->link_lock);
10348         bnxt_get_port_module_status(bp);
10349         mutex_unlock(&bp->link_lock);
10350
10351         /* VF-reps may need to be re-opened after the PF is re-opened */
10352         if (BNXT_PF(bp))
10353                 bnxt_vf_reps_open(bp);
10354         bnxt_ptp_init_rtc(bp, true);
10355         return 0;
10356
10357 open_err_irq:
10358         bnxt_del_napi(bp);
10359
10360 open_err_free_mem:
10361         bnxt_free_skbs(bp);
10362         bnxt_free_irq(bp);
10363         bnxt_free_mem(bp, true);
10364         return rc;
10365 }
10366
10367 /* rtnl_lock held */
10368 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10369 {
10370         int rc = 0;
10371
10372         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10373                 rc = -EIO;
10374         if (!rc)
10375                 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10376         if (rc) {
10377                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10378                 dev_close(bp->dev);
10379         }
10380         return rc;
10381 }
10382
10383 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10384  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
10385  * self tests.
10386  */
10387 int bnxt_half_open_nic(struct bnxt *bp)
10388 {
10389         int rc = 0;
10390
10391         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10392                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10393                 rc = -ENODEV;
10394                 goto half_open_err;
10395         }
10396
10397         rc = bnxt_alloc_mem(bp, true);
10398         if (rc) {
10399                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10400                 goto half_open_err;
10401         }
10402         set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10403         rc = bnxt_init_nic(bp, true);
10404         if (rc) {
10405                 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10406                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10407                 goto half_open_err;
10408         }
10409         return 0;
10410
10411 half_open_err:
10412         bnxt_free_skbs(bp);
10413         bnxt_free_mem(bp, true);
10414         dev_close(bp->dev);
10415         return rc;
10416 }
10417
10418 /* rtnl_lock held, this call can only be made after a previous successful
10419  * call to bnxt_half_open_nic().
10420  */
10421 void bnxt_half_close_nic(struct bnxt *bp)
10422 {
10423         bnxt_hwrm_resource_free(bp, false, true);
10424         bnxt_free_skbs(bp);
10425         bnxt_free_mem(bp, true);
10426         clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10427 }
10428
10429 void bnxt_reenable_sriov(struct bnxt *bp)
10430 {
10431         if (BNXT_PF(bp)) {
10432                 struct bnxt_pf_info *pf = &bp->pf;
10433                 int n = pf->active_vfs;
10434
10435                 if (n)
10436                         bnxt_cfg_hw_sriov(bp, &n, true);
10437         }
10438 }
10439
10440 static int bnxt_open(struct net_device *dev)
10441 {
10442         struct bnxt *bp = netdev_priv(dev);
10443         int rc;
10444
10445         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10446                 rc = bnxt_reinit_after_abort(bp);
10447                 if (rc) {
10448                         if (rc == -EBUSY)
10449                                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10450                         else
10451                                 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10452                         return -ENODEV;
10453                 }
10454         }
10455
10456         rc = bnxt_hwrm_if_change(bp, true);
10457         if (rc)
10458                 return rc;
10459
10460         rc = __bnxt_open_nic(bp, true, true);
10461         if (rc) {
10462                 bnxt_hwrm_if_change(bp, false);
10463         } else {
10464                 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10465                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10466                                 bnxt_ulp_start(bp, 0);
10467                                 bnxt_reenable_sriov(bp);
10468                         }
10469                 }
10470                 bnxt_hwmon_open(bp);
10471         }
10472
10473         return rc;
10474 }
10475
10476 static bool bnxt_drv_busy(struct bnxt *bp)
10477 {
10478         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10479                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10480 }
10481
10482 static void bnxt_get_ring_stats(struct bnxt *bp,
10483                                 struct rtnl_link_stats64 *stats);
10484
10485 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10486                              bool link_re_init)
10487 {
10488         /* Close the VF-reps before closing PF */
10489         if (BNXT_PF(bp))
10490                 bnxt_vf_reps_close(bp);
10491
10492         /* Change device state to avoid TX queue wake up's */
10493         bnxt_tx_disable(bp);
10494
10495         clear_bit(BNXT_STATE_OPEN, &bp->state);
10496         smp_mb__after_atomic();
10497         while (bnxt_drv_busy(bp))
10498                 msleep(20);
10499
10500         /* Flush rings and and disable interrupts */
10501         bnxt_shutdown_nic(bp, irq_re_init);
10502
10503         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10504
10505         bnxt_debug_dev_exit(bp);
10506         bnxt_disable_napi(bp);
10507         del_timer_sync(&bp->timer);
10508         bnxt_free_skbs(bp);
10509
10510         /* Save ring stats before shutdown */
10511         if (bp->bnapi && irq_re_init)
10512                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10513         if (irq_re_init) {
10514                 bnxt_free_irq(bp);
10515                 bnxt_del_napi(bp);
10516         }
10517         bnxt_free_mem(bp, irq_re_init);
10518 }
10519
10520 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10521 {
10522         int rc = 0;
10523
10524         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10525                 /* If we get here, it means firmware reset is in progress
10526                  * while we are trying to close.  We can safely proceed with
10527                  * the close because we are holding rtnl_lock().  Some firmware
10528                  * messages may fail as we proceed to close.  We set the
10529                  * ABORT_ERR flag here so that the FW reset thread will later
10530                  * abort when it gets the rtnl_lock() and sees the flag.
10531                  */
10532                 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10533                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10534         }
10535
10536 #ifdef CONFIG_BNXT_SRIOV
10537         if (bp->sriov_cfg) {
10538                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10539                                                       !bp->sriov_cfg,
10540                                                       BNXT_SRIOV_CFG_WAIT_TMO);
10541                 if (rc)
10542                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10543         }
10544 #endif
10545         __bnxt_close_nic(bp, irq_re_init, link_re_init);
10546         return rc;
10547 }
10548
10549 static int bnxt_close(struct net_device *dev)
10550 {
10551         struct bnxt *bp = netdev_priv(dev);
10552
10553         bnxt_hwmon_close(bp);
10554         bnxt_close_nic(bp, true, true);
10555         bnxt_hwrm_shutdown_link(bp);
10556         bnxt_hwrm_if_change(bp, false);
10557         return 0;
10558 }
10559
10560 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10561                                    u16 *val)
10562 {
10563         struct hwrm_port_phy_mdio_read_output *resp;
10564         struct hwrm_port_phy_mdio_read_input *req;
10565         int rc;
10566
10567         if (bp->hwrm_spec_code < 0x10a00)
10568                 return -EOPNOTSUPP;
10569
10570         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10571         if (rc)
10572                 return rc;
10573
10574         req->port_id = cpu_to_le16(bp->pf.port_id);
10575         req->phy_addr = phy_addr;
10576         req->reg_addr = cpu_to_le16(reg & 0x1f);
10577         if (mdio_phy_id_is_c45(phy_addr)) {
10578                 req->cl45_mdio = 1;
10579                 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10580                 req->dev_addr = mdio_phy_id_devad(phy_addr);
10581                 req->reg_addr = cpu_to_le16(reg);
10582         }
10583
10584         resp = hwrm_req_hold(bp, req);
10585         rc = hwrm_req_send(bp, req);
10586         if (!rc)
10587                 *val = le16_to_cpu(resp->reg_data);
10588         hwrm_req_drop(bp, req);
10589         return rc;
10590 }
10591
10592 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10593                                     u16 val)
10594 {
10595         struct hwrm_port_phy_mdio_write_input *req;
10596         int rc;
10597
10598         if (bp->hwrm_spec_code < 0x10a00)
10599                 return -EOPNOTSUPP;
10600
10601         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10602         if (rc)
10603                 return rc;
10604
10605         req->port_id = cpu_to_le16(bp->pf.port_id);
10606         req->phy_addr = phy_addr;
10607         req->reg_addr = cpu_to_le16(reg & 0x1f);
10608         if (mdio_phy_id_is_c45(phy_addr)) {
10609                 req->cl45_mdio = 1;
10610                 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10611                 req->dev_addr = mdio_phy_id_devad(phy_addr);
10612                 req->reg_addr = cpu_to_le16(reg);
10613         }
10614         req->reg_data = cpu_to_le16(val);
10615
10616         return hwrm_req_send(bp, req);
10617 }
10618
10619 /* rtnl_lock held */
10620 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10621 {
10622         struct mii_ioctl_data *mdio = if_mii(ifr);
10623         struct bnxt *bp = netdev_priv(dev);
10624         int rc;
10625
10626         switch (cmd) {
10627         case SIOCGMIIPHY:
10628                 mdio->phy_id = bp->link_info.phy_addr;
10629
10630                 fallthrough;
10631         case SIOCGMIIREG: {
10632                 u16 mii_regval = 0;
10633
10634                 if (!netif_running(dev))
10635                         return -EAGAIN;
10636
10637                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10638                                              &mii_regval);
10639                 mdio->val_out = mii_regval;
10640                 return rc;
10641         }
10642
10643         case SIOCSMIIREG:
10644                 if (!netif_running(dev))
10645                         return -EAGAIN;
10646
10647                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10648                                                 mdio->val_in);
10649
10650         case SIOCSHWTSTAMP:
10651                 return bnxt_hwtstamp_set(dev, ifr);
10652
10653         case SIOCGHWTSTAMP:
10654                 return bnxt_hwtstamp_get(dev, ifr);
10655
10656         default:
10657                 /* do nothing */
10658                 break;
10659         }
10660         return -EOPNOTSUPP;
10661 }
10662
10663 static void bnxt_get_ring_stats(struct bnxt *bp,
10664                                 struct rtnl_link_stats64 *stats)
10665 {
10666         int i;
10667
10668         for (i = 0; i < bp->cp_nr_rings; i++) {
10669                 struct bnxt_napi *bnapi = bp->bnapi[i];
10670                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10671                 u64 *sw = cpr->stats.sw_stats;
10672
10673                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10674                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10675                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10676
10677                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10678                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10679                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10680
10681                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10682                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10683                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10684
10685                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10686                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10687                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10688
10689                 stats->rx_missed_errors +=
10690                         BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10691
10692                 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10693
10694                 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10695
10696                 stats->rx_dropped +=
10697                         cpr->sw_stats.rx.rx_netpoll_discards +
10698                         cpr->sw_stats.rx.rx_oom_discards;
10699         }
10700 }
10701
10702 static void bnxt_add_prev_stats(struct bnxt *bp,
10703                                 struct rtnl_link_stats64 *stats)
10704 {
10705         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10706
10707         stats->rx_packets += prev_stats->rx_packets;
10708         stats->tx_packets += prev_stats->tx_packets;
10709         stats->rx_bytes += prev_stats->rx_bytes;
10710         stats->tx_bytes += prev_stats->tx_bytes;
10711         stats->rx_missed_errors += prev_stats->rx_missed_errors;
10712         stats->multicast += prev_stats->multicast;
10713         stats->rx_dropped += prev_stats->rx_dropped;
10714         stats->tx_dropped += prev_stats->tx_dropped;
10715 }
10716
10717 static void
10718 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10719 {
10720         struct bnxt *bp = netdev_priv(dev);
10721
10722         set_bit(BNXT_STATE_READ_STATS, &bp->state);
10723         /* Make sure bnxt_close_nic() sees that we are reading stats before
10724          * we check the BNXT_STATE_OPEN flag.
10725          */
10726         smp_mb__after_atomic();
10727         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10728                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10729                 *stats = bp->net_stats_prev;
10730                 return;
10731         }
10732
10733         bnxt_get_ring_stats(bp, stats);
10734         bnxt_add_prev_stats(bp, stats);
10735
10736         if (bp->flags & BNXT_FLAG_PORT_STATS) {
10737                 u64 *rx = bp->port_stats.sw_stats;
10738                 u64 *tx = bp->port_stats.sw_stats +
10739                           BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10740
10741                 stats->rx_crc_errors =
10742                         BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10743                 stats->rx_frame_errors =
10744                         BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10745                 stats->rx_length_errors =
10746                         BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10747                         BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10748                         BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10749                 stats->rx_errors =
10750                         BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10751                         BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10752                 stats->collisions =
10753                         BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10754                 stats->tx_fifo_errors =
10755                         BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10756                 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10757         }
10758         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10759 }
10760
10761 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10762 {
10763         struct net_device *dev = bp->dev;
10764         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10765         struct netdev_hw_addr *ha;
10766         u8 *haddr;
10767         int mc_count = 0;
10768         bool update = false;
10769         int off = 0;
10770
10771         netdev_for_each_mc_addr(ha, dev) {
10772                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10773                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10774                         vnic->mc_list_count = 0;
10775                         return false;
10776                 }
10777                 haddr = ha->addr;
10778                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10779                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10780                         update = true;
10781                 }
10782                 off += ETH_ALEN;
10783                 mc_count++;
10784         }
10785         if (mc_count)
10786                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10787
10788         if (mc_count != vnic->mc_list_count) {
10789                 vnic->mc_list_count = mc_count;
10790                 update = true;
10791         }
10792         return update;
10793 }
10794
10795 static bool bnxt_uc_list_updated(struct bnxt *bp)
10796 {
10797         struct net_device *dev = bp->dev;
10798         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10799         struct netdev_hw_addr *ha;
10800         int off = 0;
10801
10802         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10803                 return true;
10804
10805         netdev_for_each_uc_addr(ha, dev) {
10806                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10807                         return true;
10808
10809                 off += ETH_ALEN;
10810         }
10811         return false;
10812 }
10813
10814 static void bnxt_set_rx_mode(struct net_device *dev)
10815 {
10816         struct bnxt *bp = netdev_priv(dev);
10817         struct bnxt_vnic_info *vnic;
10818         bool mc_update = false;
10819         bool uc_update;
10820         u32 mask;
10821
10822         if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10823                 return;
10824
10825         vnic = &bp->vnic_info[0];
10826         mask = vnic->rx_mask;
10827         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10828                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10829                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10830                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10831
10832         if (dev->flags & IFF_PROMISC)
10833                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10834
10835         uc_update = bnxt_uc_list_updated(bp);
10836
10837         if (dev->flags & IFF_BROADCAST)
10838                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10839         if (dev->flags & IFF_ALLMULTI) {
10840                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10841                 vnic->mc_list_count = 0;
10842         } else if (dev->flags & IFF_MULTICAST) {
10843                 mc_update = bnxt_mc_list_updated(bp, &mask);
10844         }
10845
10846         if (mask != vnic->rx_mask || uc_update || mc_update) {
10847                 vnic->rx_mask = mask;
10848
10849                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10850                 bnxt_queue_sp_work(bp);
10851         }
10852 }
10853
10854 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10855 {
10856         struct net_device *dev = bp->dev;
10857         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10858         struct hwrm_cfa_l2_filter_free_input *req;
10859         struct netdev_hw_addr *ha;
10860         int i, off = 0, rc;
10861         bool uc_update;
10862
10863         netif_addr_lock_bh(dev);
10864         uc_update = bnxt_uc_list_updated(bp);
10865         netif_addr_unlock_bh(dev);
10866
10867         if (!uc_update)
10868                 goto skip_uc;
10869
10870         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
10871         if (rc)
10872                 return rc;
10873         hwrm_req_hold(bp, req);
10874         for (i = 1; i < vnic->uc_filter_count; i++) {
10875                 req->l2_filter_id = vnic->fw_l2_filter_id[i];
10876
10877                 rc = hwrm_req_send(bp, req);
10878         }
10879         hwrm_req_drop(bp, req);
10880
10881         vnic->uc_filter_count = 1;
10882
10883         netif_addr_lock_bh(dev);
10884         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10885                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10886         } else {
10887                 netdev_for_each_uc_addr(ha, dev) {
10888                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10889                         off += ETH_ALEN;
10890                         vnic->uc_filter_count++;
10891                 }
10892         }
10893         netif_addr_unlock_bh(dev);
10894
10895         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10896                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10897                 if (rc) {
10898                         if (BNXT_VF(bp) && rc == -ENODEV) {
10899                                 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
10900                                         netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
10901                                 else
10902                                         netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
10903                                 rc = 0;
10904                         } else {
10905                                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
10906                         }
10907                         vnic->uc_filter_count = i;
10908                         return rc;
10909                 }
10910         }
10911         if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
10912                 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
10913
10914 skip_uc:
10915         if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10916             !bnxt_promisc_ok(bp))
10917                 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10918         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10919         if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
10920                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10921                             rc);
10922                 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10923                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10924                 vnic->mc_list_count = 0;
10925                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10926         }
10927         if (rc)
10928                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10929                            rc);
10930
10931         return rc;
10932 }
10933
10934 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10935 {
10936 #ifdef CONFIG_BNXT_SRIOV
10937         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10938                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10939
10940                 /* No minimum rings were provisioned by the PF.  Don't
10941                  * reserve rings by default when device is down.
10942                  */
10943                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10944                         return true;
10945
10946                 if (!netif_running(bp->dev))
10947                         return false;
10948         }
10949 #endif
10950         return true;
10951 }
10952
10953 /* If the chip and firmware supports RFS */
10954 static bool bnxt_rfs_supported(struct bnxt *bp)
10955 {
10956         if (bp->flags & BNXT_FLAG_CHIP_P5) {
10957                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10958                         return true;
10959                 return false;
10960         }
10961         /* 212 firmware is broken for aRFS */
10962         if (BNXT_FW_MAJ(bp) == 212)
10963                 return false;
10964         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10965                 return true;
10966         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10967                 return true;
10968         return false;
10969 }
10970
10971 /* If runtime conditions support RFS */
10972 static bool bnxt_rfs_capable(struct bnxt *bp)
10973 {
10974 #ifdef CONFIG_RFS_ACCEL
10975         int vnics, max_vnics, max_rss_ctxs;
10976
10977         if (bp->flags & BNXT_FLAG_CHIP_P5)
10978                 return bnxt_rfs_supported(bp);
10979         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10980                 return false;
10981
10982         vnics = 1 + bp->rx_nr_rings;
10983         max_vnics = bnxt_get_max_func_vnics(bp);
10984         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10985
10986         /* RSS contexts not a limiting factor */
10987         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10988                 max_rss_ctxs = max_vnics;
10989         if (vnics > max_vnics || vnics > max_rss_ctxs) {
10990                 if (bp->rx_nr_rings > 1)
10991                         netdev_warn(bp->dev,
10992                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10993                                     min(max_rss_ctxs - 1, max_vnics - 1));
10994                 return false;
10995         }
10996
10997         if (!BNXT_NEW_RM(bp))
10998                 return true;
10999
11000         if (vnics == bp->hw_resc.resv_vnics)
11001                 return true;
11002
11003         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
11004         if (vnics <= bp->hw_resc.resv_vnics)
11005                 return true;
11006
11007         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
11008         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
11009         return false;
11010 #else
11011         return false;
11012 #endif
11013 }
11014
11015 static netdev_features_t bnxt_fix_features(struct net_device *dev,
11016                                            netdev_features_t features)
11017 {
11018         struct bnxt *bp = netdev_priv(dev);
11019         netdev_features_t vlan_features;
11020
11021         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
11022                 features &= ~NETIF_F_NTUPLE;
11023
11024         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
11025                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11026
11027         if (!(features & NETIF_F_GRO))
11028                 features &= ~NETIF_F_GRO_HW;
11029
11030         if (features & NETIF_F_GRO_HW)
11031                 features &= ~NETIF_F_LRO;
11032
11033         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
11034          * turned on or off together.
11035          */
11036         vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
11037         if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
11038                 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11039                         features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
11040                 else if (vlan_features)
11041                         features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
11042         }
11043 #ifdef CONFIG_BNXT_SRIOV
11044         if (BNXT_VF(bp) && bp->vf.vlan)
11045                 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
11046 #endif
11047         return features;
11048 }
11049
11050 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
11051 {
11052         struct bnxt *bp = netdev_priv(dev);
11053         u32 flags = bp->flags;
11054         u32 changes;
11055         int rc = 0;
11056         bool re_init = false;
11057         bool update_tpa = false;
11058
11059         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
11060         if (features & NETIF_F_GRO_HW)
11061                 flags |= BNXT_FLAG_GRO;
11062         else if (features & NETIF_F_LRO)
11063                 flags |= BNXT_FLAG_LRO;
11064
11065         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
11066                 flags &= ~BNXT_FLAG_TPA;
11067
11068         if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11069                 flags |= BNXT_FLAG_STRIP_VLAN;
11070
11071         if (features & NETIF_F_NTUPLE)
11072                 flags |= BNXT_FLAG_RFS;
11073
11074         changes = flags ^ bp->flags;
11075         if (changes & BNXT_FLAG_TPA) {
11076                 update_tpa = true;
11077                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
11078                     (flags & BNXT_FLAG_TPA) == 0 ||
11079                     (bp->flags & BNXT_FLAG_CHIP_P5))
11080                         re_init = true;
11081         }
11082
11083         if (changes & ~BNXT_FLAG_TPA)
11084                 re_init = true;
11085
11086         if (flags != bp->flags) {
11087                 u32 old_flags = bp->flags;
11088
11089                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11090                         bp->flags = flags;
11091                         if (update_tpa)
11092                                 bnxt_set_ring_params(bp);
11093                         return rc;
11094                 }
11095
11096                 if (re_init) {
11097                         bnxt_close_nic(bp, false, false);
11098                         bp->flags = flags;
11099                         if (update_tpa)
11100                                 bnxt_set_ring_params(bp);
11101
11102                         return bnxt_open_nic(bp, false, false);
11103                 }
11104                 if (update_tpa) {
11105                         bp->flags = flags;
11106                         rc = bnxt_set_tpa(bp,
11107                                           (flags & BNXT_FLAG_TPA) ?
11108                                           true : false);
11109                         if (rc)
11110                                 bp->flags = old_flags;
11111                 }
11112         }
11113         return rc;
11114 }
11115
11116 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
11117                               u8 **nextp)
11118 {
11119         struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
11120         int hdr_count = 0;
11121         u8 *nexthdr;
11122         int start;
11123
11124         /* Check that there are at most 2 IPv6 extension headers, no
11125          * fragment header, and each is <= 64 bytes.
11126          */
11127         start = nw_off + sizeof(*ip6h);
11128         nexthdr = &ip6h->nexthdr;
11129         while (ipv6_ext_hdr(*nexthdr)) {
11130                 struct ipv6_opt_hdr *hp;
11131                 int hdrlen;
11132
11133                 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
11134                     *nexthdr == NEXTHDR_FRAGMENT)
11135                         return false;
11136                 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
11137                                           skb_headlen(skb), NULL);
11138                 if (!hp)
11139                         return false;
11140                 if (*nexthdr == NEXTHDR_AUTH)
11141                         hdrlen = ipv6_authlen(hp);
11142                 else
11143                         hdrlen = ipv6_optlen(hp);
11144
11145                 if (hdrlen > 64)
11146                         return false;
11147                 nexthdr = &hp->nexthdr;
11148                 start += hdrlen;
11149                 hdr_count++;
11150         }
11151         if (nextp) {
11152                 /* Caller will check inner protocol */
11153                 if (skb->encapsulation) {
11154                         *nextp = nexthdr;
11155                         return true;
11156                 }
11157                 *nextp = NULL;
11158         }
11159         /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11160         return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11161 }
11162
11163 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11164 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11165 {
11166         struct udphdr *uh = udp_hdr(skb);
11167         __be16 udp_port = uh->dest;
11168
11169         if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11170                 return false;
11171         if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11172                 struct ethhdr *eh = inner_eth_hdr(skb);
11173
11174                 switch (eh->h_proto) {
11175                 case htons(ETH_P_IP):
11176                         return true;
11177                 case htons(ETH_P_IPV6):
11178                         return bnxt_exthdr_check(bp, skb,
11179                                                  skb_inner_network_offset(skb),
11180                                                  NULL);
11181                 }
11182         }
11183         return false;
11184 }
11185
11186 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11187 {
11188         switch (l4_proto) {
11189         case IPPROTO_UDP:
11190                 return bnxt_udp_tunl_check(bp, skb);
11191         case IPPROTO_IPIP:
11192                 return true;
11193         case IPPROTO_GRE: {
11194                 switch (skb->inner_protocol) {
11195                 default:
11196                         return false;
11197                 case htons(ETH_P_IP):
11198                         return true;
11199                 case htons(ETH_P_IPV6):
11200                         fallthrough;
11201                 }
11202         }
11203         case IPPROTO_IPV6:
11204                 /* Check ext headers of inner ipv6 */
11205                 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11206                                          NULL);
11207         }
11208         return false;
11209 }
11210
11211 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11212                                              struct net_device *dev,
11213                                              netdev_features_t features)
11214 {
11215         struct bnxt *bp = netdev_priv(dev);
11216         u8 *l4_proto;
11217
11218         features = vlan_features_check(skb, features);
11219         switch (vlan_get_protocol(skb)) {
11220         case htons(ETH_P_IP):
11221                 if (!skb->encapsulation)
11222                         return features;
11223                 l4_proto = &ip_hdr(skb)->protocol;
11224                 if (bnxt_tunl_check(bp, skb, *l4_proto))
11225                         return features;
11226                 break;
11227         case htons(ETH_P_IPV6):
11228                 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11229                                        &l4_proto))
11230                         break;
11231                 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11232                         return features;
11233                 break;
11234         }
11235         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11236 }
11237
11238 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11239                          u32 *reg_buf)
11240 {
11241         struct hwrm_dbg_read_direct_output *resp;
11242         struct hwrm_dbg_read_direct_input *req;
11243         __le32 *dbg_reg_buf;
11244         dma_addr_t mapping;
11245         int rc, i;
11246
11247         rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11248         if (rc)
11249                 return rc;
11250
11251         dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11252                                          &mapping);
11253         if (!dbg_reg_buf) {
11254                 rc = -ENOMEM;
11255                 goto dbg_rd_reg_exit;
11256         }
11257
11258         req->host_dest_addr = cpu_to_le64(mapping);
11259
11260         resp = hwrm_req_hold(bp, req);
11261         req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11262         req->read_len32 = cpu_to_le32(num_words);
11263
11264         rc = hwrm_req_send(bp, req);
11265         if (rc || resp->error_code) {
11266                 rc = -EIO;
11267                 goto dbg_rd_reg_exit;
11268         }
11269         for (i = 0; i < num_words; i++)
11270                 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11271
11272 dbg_rd_reg_exit:
11273         hwrm_req_drop(bp, req);
11274         return rc;
11275 }
11276
11277 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11278                                        u32 ring_id, u32 *prod, u32 *cons)
11279 {
11280         struct hwrm_dbg_ring_info_get_output *resp;
11281         struct hwrm_dbg_ring_info_get_input *req;
11282         int rc;
11283
11284         rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11285         if (rc)
11286                 return rc;
11287
11288         req->ring_type = ring_type;
11289         req->fw_ring_id = cpu_to_le32(ring_id);
11290         resp = hwrm_req_hold(bp, req);
11291         rc = hwrm_req_send(bp, req);
11292         if (!rc) {
11293                 *prod = le32_to_cpu(resp->producer_index);
11294                 *cons = le32_to_cpu(resp->consumer_index);
11295         }
11296         hwrm_req_drop(bp, req);
11297         return rc;
11298 }
11299
11300 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11301 {
11302         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11303         int i = bnapi->index;
11304
11305         if (!txr)
11306                 return;
11307
11308         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11309                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11310                     txr->tx_cons);
11311 }
11312
11313 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11314 {
11315         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11316         int i = bnapi->index;
11317
11318         if (!rxr)
11319                 return;
11320
11321         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11322                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11323                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11324                     rxr->rx_sw_agg_prod);
11325 }
11326
11327 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11328 {
11329         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11330         int i = bnapi->index;
11331
11332         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11333                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11334 }
11335
11336 static void bnxt_dbg_dump_states(struct bnxt *bp)
11337 {
11338         int i;
11339         struct bnxt_napi *bnapi;
11340
11341         for (i = 0; i < bp->cp_nr_rings; i++) {
11342                 bnapi = bp->bnapi[i];
11343                 if (netif_msg_drv(bp)) {
11344                         bnxt_dump_tx_sw_state(bnapi);
11345                         bnxt_dump_rx_sw_state(bnapi);
11346                         bnxt_dump_cp_sw_state(bnapi);
11347                 }
11348         }
11349 }
11350
11351 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11352 {
11353         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11354         struct hwrm_ring_reset_input *req;
11355         struct bnxt_napi *bnapi = rxr->bnapi;
11356         struct bnxt_cp_ring_info *cpr;
11357         u16 cp_ring_id;
11358         int rc;
11359
11360         rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11361         if (rc)
11362                 return rc;
11363
11364         cpr = &bnapi->cp_ring;
11365         cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11366         req->cmpl_ring = cpu_to_le16(cp_ring_id);
11367         req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11368         req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11369         return hwrm_req_send_silent(bp, req);
11370 }
11371
11372 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11373 {
11374         if (!silent)
11375                 bnxt_dbg_dump_states(bp);
11376         if (netif_running(bp->dev)) {
11377                 int rc;
11378
11379                 if (silent) {
11380                         bnxt_close_nic(bp, false, false);
11381                         bnxt_open_nic(bp, false, false);
11382                 } else {
11383                         bnxt_ulp_stop(bp);
11384                         bnxt_close_nic(bp, true, false);
11385                         rc = bnxt_open_nic(bp, true, false);
11386                         bnxt_ulp_start(bp, rc);
11387                 }
11388         }
11389 }
11390
11391 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11392 {
11393         struct bnxt *bp = netdev_priv(dev);
11394
11395         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
11396         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11397         bnxt_queue_sp_work(bp);
11398 }
11399
11400 static void bnxt_fw_health_check(struct bnxt *bp)
11401 {
11402         struct bnxt_fw_health *fw_health = bp->fw_health;
11403         u32 val;
11404
11405         if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11406                 return;
11407
11408         /* Make sure it is enabled before checking the tmr_counter. */
11409         smp_rmb();
11410         if (fw_health->tmr_counter) {
11411                 fw_health->tmr_counter--;
11412                 return;
11413         }
11414
11415         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11416         if (val == fw_health->last_fw_heartbeat) {
11417                 fw_health->arrests++;
11418                 goto fw_reset;
11419         }
11420
11421         fw_health->last_fw_heartbeat = val;
11422
11423         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11424         if (val != fw_health->last_fw_reset_cnt) {
11425                 fw_health->discoveries++;
11426                 goto fw_reset;
11427         }
11428
11429         fw_health->tmr_counter = fw_health->tmr_multiplier;
11430         return;
11431
11432 fw_reset:
11433         set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11434         bnxt_queue_sp_work(bp);
11435 }
11436
11437 static void bnxt_timer(struct timer_list *t)
11438 {
11439         struct bnxt *bp = from_timer(bp, t, timer);
11440         struct net_device *dev = bp->dev;
11441
11442         if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11443                 return;
11444
11445         if (atomic_read(&bp->intr_sem) != 0)
11446                 goto bnxt_restart_timer;
11447
11448         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11449                 bnxt_fw_health_check(bp);
11450
11451         if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) {
11452                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11453                 bnxt_queue_sp_work(bp);
11454         }
11455
11456         if (bnxt_tc_flower_enabled(bp)) {
11457                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11458                 bnxt_queue_sp_work(bp);
11459         }
11460
11461 #ifdef CONFIG_RFS_ACCEL
11462         if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11463                 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11464                 bnxt_queue_sp_work(bp);
11465         }
11466 #endif /*CONFIG_RFS_ACCEL*/
11467
11468         if (bp->link_info.phy_retry) {
11469                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11470                         bp->link_info.phy_retry = false;
11471                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11472                 } else {
11473                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11474                         bnxt_queue_sp_work(bp);
11475                 }
11476         }
11477
11478         if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) {
11479                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
11480                 bnxt_queue_sp_work(bp);
11481         }
11482
11483         if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11484             netif_carrier_ok(dev)) {
11485                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11486                 bnxt_queue_sp_work(bp);
11487         }
11488 bnxt_restart_timer:
11489         mod_timer(&bp->timer, jiffies + bp->current_interval);
11490 }
11491
11492 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11493 {
11494         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11495          * set.  If the device is being closed, bnxt_close() may be holding
11496          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
11497          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11498          */
11499         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11500         rtnl_lock();
11501 }
11502
11503 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11504 {
11505         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11506         rtnl_unlock();
11507 }
11508
11509 /* Only called from bnxt_sp_task() */
11510 static void bnxt_reset(struct bnxt *bp, bool silent)
11511 {
11512         bnxt_rtnl_lock_sp(bp);
11513         if (test_bit(BNXT_STATE_OPEN, &bp->state))
11514                 bnxt_reset_task(bp, silent);
11515         bnxt_rtnl_unlock_sp(bp);
11516 }
11517
11518 /* Only called from bnxt_sp_task() */
11519 static void bnxt_rx_ring_reset(struct bnxt *bp)
11520 {
11521         int i;
11522
11523         bnxt_rtnl_lock_sp(bp);
11524         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11525                 bnxt_rtnl_unlock_sp(bp);
11526                 return;
11527         }
11528         /* Disable and flush TPA before resetting the RX ring */
11529         if (bp->flags & BNXT_FLAG_TPA)
11530                 bnxt_set_tpa(bp, false);
11531         for (i = 0; i < bp->rx_nr_rings; i++) {
11532                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11533                 struct bnxt_cp_ring_info *cpr;
11534                 int rc;
11535
11536                 if (!rxr->bnapi->in_reset)
11537                         continue;
11538
11539                 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11540                 if (rc) {
11541                         if (rc == -EINVAL || rc == -EOPNOTSUPP)
11542                                 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11543                         else
11544                                 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11545                                             rc);
11546                         bnxt_reset_task(bp, true);
11547                         break;
11548                 }
11549                 bnxt_free_one_rx_ring_skbs(bp, i);
11550                 rxr->rx_prod = 0;
11551                 rxr->rx_agg_prod = 0;
11552                 rxr->rx_sw_agg_prod = 0;
11553                 rxr->rx_next_cons = 0;
11554                 rxr->bnapi->in_reset = false;
11555                 bnxt_alloc_one_rx_ring(bp, i);
11556                 cpr = &rxr->bnapi->cp_ring;
11557                 cpr->sw_stats.rx.rx_resets++;
11558                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11559                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11560                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11561         }
11562         if (bp->flags & BNXT_FLAG_TPA)
11563                 bnxt_set_tpa(bp, true);
11564         bnxt_rtnl_unlock_sp(bp);
11565 }
11566
11567 static void bnxt_fw_reset_close(struct bnxt *bp)
11568 {
11569         bnxt_ulp_stop(bp);
11570         /* When firmware is in fatal state, quiesce device and disable
11571          * bus master to prevent any potential bad DMAs before freeing
11572          * kernel memory.
11573          */
11574         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11575                 u16 val = 0;
11576
11577                 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11578                 if (val == 0xffff)
11579                         bp->fw_reset_min_dsecs = 0;
11580                 bnxt_tx_disable(bp);
11581                 bnxt_disable_napi(bp);
11582                 bnxt_disable_int_sync(bp);
11583                 bnxt_free_irq(bp);
11584                 bnxt_clear_int_mode(bp);
11585                 pci_disable_device(bp->pdev);
11586         }
11587         __bnxt_close_nic(bp, true, false);
11588         bnxt_vf_reps_free(bp);
11589         bnxt_clear_int_mode(bp);
11590         bnxt_hwrm_func_drv_unrgtr(bp);
11591         if (pci_is_enabled(bp->pdev))
11592                 pci_disable_device(bp->pdev);
11593         bnxt_free_ctx_mem(bp);
11594         kfree(bp->ctx);
11595         bp->ctx = NULL;
11596 }
11597
11598 static bool is_bnxt_fw_ok(struct bnxt *bp)
11599 {
11600         struct bnxt_fw_health *fw_health = bp->fw_health;
11601         bool no_heartbeat = false, has_reset = false;
11602         u32 val;
11603
11604         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11605         if (val == fw_health->last_fw_heartbeat)
11606                 no_heartbeat = true;
11607
11608         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11609         if (val != fw_health->last_fw_reset_cnt)
11610                 has_reset = true;
11611
11612         if (!no_heartbeat && has_reset)
11613                 return true;
11614
11615         return false;
11616 }
11617
11618 /* rtnl_lock is acquired before calling this function */
11619 static void bnxt_force_fw_reset(struct bnxt *bp)
11620 {
11621         struct bnxt_fw_health *fw_health = bp->fw_health;
11622         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11623         u32 wait_dsecs;
11624
11625         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11626             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11627                 return;
11628
11629         if (ptp) {
11630                 spin_lock_bh(&ptp->ptp_lock);
11631                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11632                 spin_unlock_bh(&ptp->ptp_lock);
11633         } else {
11634                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11635         }
11636         bnxt_fw_reset_close(bp);
11637         wait_dsecs = fw_health->master_func_wait_dsecs;
11638         if (fw_health->primary) {
11639                 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11640                         wait_dsecs = 0;
11641                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11642         } else {
11643                 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11644                 wait_dsecs = fw_health->normal_func_wait_dsecs;
11645                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11646         }
11647
11648         bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11649         bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11650         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11651 }
11652
11653 void bnxt_fw_exception(struct bnxt *bp)
11654 {
11655         netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11656         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11657         bnxt_rtnl_lock_sp(bp);
11658         bnxt_force_fw_reset(bp);
11659         bnxt_rtnl_unlock_sp(bp);
11660 }
11661
11662 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11663  * < 0 on error.
11664  */
11665 static int bnxt_get_registered_vfs(struct bnxt *bp)
11666 {
11667 #ifdef CONFIG_BNXT_SRIOV
11668         int rc;
11669
11670         if (!BNXT_PF(bp))
11671                 return 0;
11672
11673         rc = bnxt_hwrm_func_qcfg(bp);
11674         if (rc) {
11675                 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11676                 return rc;
11677         }
11678         if (bp->pf.registered_vfs)
11679                 return bp->pf.registered_vfs;
11680         if (bp->sriov_cfg)
11681                 return 1;
11682 #endif
11683         return 0;
11684 }
11685
11686 void bnxt_fw_reset(struct bnxt *bp)
11687 {
11688         bnxt_rtnl_lock_sp(bp);
11689         if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11690             !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11691                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11692                 int n = 0, tmo;
11693
11694                 if (ptp) {
11695                         spin_lock_bh(&ptp->ptp_lock);
11696                         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11697                         spin_unlock_bh(&ptp->ptp_lock);
11698                 } else {
11699                         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11700                 }
11701                 if (bp->pf.active_vfs &&
11702                     !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11703                         n = bnxt_get_registered_vfs(bp);
11704                 if (n < 0) {
11705                         netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11706                                    n);
11707                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11708                         dev_close(bp->dev);
11709                         goto fw_reset_exit;
11710                 } else if (n > 0) {
11711                         u16 vf_tmo_dsecs = n * 10;
11712
11713                         if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11714                                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11715                         bp->fw_reset_state =
11716                                 BNXT_FW_RESET_STATE_POLL_VF;
11717                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11718                         goto fw_reset_exit;
11719                 }
11720                 bnxt_fw_reset_close(bp);
11721                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11722                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11723                         tmo = HZ / 10;
11724                 } else {
11725                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11726                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
11727                 }
11728                 bnxt_queue_fw_reset_work(bp, tmo);
11729         }
11730 fw_reset_exit:
11731         bnxt_rtnl_unlock_sp(bp);
11732 }
11733
11734 static void bnxt_chk_missed_irq(struct bnxt *bp)
11735 {
11736         int i;
11737
11738         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11739                 return;
11740
11741         for (i = 0; i < bp->cp_nr_rings; i++) {
11742                 struct bnxt_napi *bnapi = bp->bnapi[i];
11743                 struct bnxt_cp_ring_info *cpr;
11744                 u32 fw_ring_id;
11745                 int j;
11746
11747                 if (!bnapi)
11748                         continue;
11749
11750                 cpr = &bnapi->cp_ring;
11751                 for (j = 0; j < 2; j++) {
11752                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11753                         u32 val[2];
11754
11755                         if (!cpr2 || cpr2->has_more_work ||
11756                             !bnxt_has_work(bp, cpr2))
11757                                 continue;
11758
11759                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11760                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11761                                 continue;
11762                         }
11763                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11764                         bnxt_dbg_hwrm_ring_info_get(bp,
11765                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11766                                 fw_ring_id, &val[0], &val[1]);
11767                         cpr->sw_stats.cmn.missed_irqs++;
11768                 }
11769         }
11770 }
11771
11772 static void bnxt_cfg_ntp_filters(struct bnxt *);
11773
11774 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11775 {
11776         struct bnxt_link_info *link_info = &bp->link_info;
11777
11778         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11779                 link_info->autoneg = BNXT_AUTONEG_SPEED;
11780                 if (bp->hwrm_spec_code >= 0x10201) {
11781                         if (link_info->auto_pause_setting &
11782                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11783                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11784                 } else {
11785                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11786                 }
11787                 link_info->advertising = link_info->auto_link_speeds;
11788                 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11789         } else {
11790                 link_info->req_link_speed = link_info->force_link_speed;
11791                 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11792                 if (link_info->force_pam4_link_speed) {
11793                         link_info->req_link_speed =
11794                                 link_info->force_pam4_link_speed;
11795                         link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11796                 }
11797                 link_info->req_duplex = link_info->duplex_setting;
11798         }
11799         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11800                 link_info->req_flow_ctrl =
11801                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11802         else
11803                 link_info->req_flow_ctrl = link_info->force_pause_setting;
11804 }
11805
11806 static void bnxt_fw_echo_reply(struct bnxt *bp)
11807 {
11808         struct bnxt_fw_health *fw_health = bp->fw_health;
11809         struct hwrm_func_echo_response_input *req;
11810         int rc;
11811
11812         rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
11813         if (rc)
11814                 return;
11815         req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11816         req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11817         hwrm_req_send(bp, req);
11818 }
11819
11820 static void bnxt_sp_task(struct work_struct *work)
11821 {
11822         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11823
11824         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11825         smp_mb__after_atomic();
11826         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11827                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11828                 return;
11829         }
11830
11831         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11832                 bnxt_cfg_rx_mode(bp);
11833
11834         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11835                 bnxt_cfg_ntp_filters(bp);
11836         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11837                 bnxt_hwrm_exec_fwd_req(bp);
11838         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11839                 bnxt_hwrm_port_qstats(bp, 0);
11840                 bnxt_hwrm_port_qstats_ext(bp, 0);
11841                 bnxt_accumulate_all_stats(bp);
11842         }
11843
11844         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11845                 int rc;
11846
11847                 mutex_lock(&bp->link_lock);
11848                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11849                                        &bp->sp_event))
11850                         bnxt_hwrm_phy_qcaps(bp);
11851
11852                 rc = bnxt_update_link(bp, true);
11853                 if (rc)
11854                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11855                                    rc);
11856
11857                 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11858                                        &bp->sp_event))
11859                         bnxt_init_ethtool_link_settings(bp);
11860                 mutex_unlock(&bp->link_lock);
11861         }
11862         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11863                 int rc;
11864
11865                 mutex_lock(&bp->link_lock);
11866                 rc = bnxt_update_phy_setting(bp);
11867                 mutex_unlock(&bp->link_lock);
11868                 if (rc) {
11869                         netdev_warn(bp->dev, "update phy settings retry failed\n");
11870                 } else {
11871                         bp->link_info.phy_retry = false;
11872                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
11873                 }
11874         }
11875         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11876                 mutex_lock(&bp->link_lock);
11877                 bnxt_get_port_module_status(bp);
11878                 mutex_unlock(&bp->link_lock);
11879         }
11880
11881         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11882                 bnxt_tc_flow_stats_work(bp);
11883
11884         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11885                 bnxt_chk_missed_irq(bp);
11886
11887         if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11888                 bnxt_fw_echo_reply(bp);
11889
11890         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
11891          * must be the last functions to be called before exiting.
11892          */
11893         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11894                 bnxt_reset(bp, false);
11895
11896         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11897                 bnxt_reset(bp, true);
11898
11899         if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11900                 bnxt_rx_ring_reset(bp);
11901
11902         if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
11903                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
11904                     test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
11905                         bnxt_devlink_health_fw_report(bp);
11906                 else
11907                         bnxt_fw_reset(bp);
11908         }
11909
11910         if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11911                 if (!is_bnxt_fw_ok(bp))
11912                         bnxt_devlink_health_fw_report(bp);
11913         }
11914
11915         smp_mb__before_atomic();
11916         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11917 }
11918
11919 /* Under rtnl_lock */
11920 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11921                      int tx_xdp)
11922 {
11923         int max_rx, max_tx, tx_sets = 1;
11924         int tx_rings_needed, stats;
11925         int rx_rings = rx;
11926         int cp, vnics, rc;
11927
11928         if (tcs)
11929                 tx_sets = tcs;
11930
11931         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11932         if (rc)
11933                 return rc;
11934
11935         if (max_rx < rx)
11936                 return -ENOMEM;
11937
11938         tx_rings_needed = tx * tx_sets + tx_xdp;
11939         if (max_tx < tx_rings_needed)
11940                 return -ENOMEM;
11941
11942         vnics = 1;
11943         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11944                 vnics += rx_rings;
11945
11946         if (bp->flags & BNXT_FLAG_AGG_RINGS)
11947                 rx_rings <<= 1;
11948         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11949         stats = cp;
11950         if (BNXT_NEW_RM(bp)) {
11951                 cp += bnxt_get_ulp_msix_num(bp);
11952                 stats += bnxt_get_ulp_stat_ctxs(bp);
11953         }
11954         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11955                                      stats, vnics);
11956 }
11957
11958 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11959 {
11960         if (bp->bar2) {
11961                 pci_iounmap(pdev, bp->bar2);
11962                 bp->bar2 = NULL;
11963         }
11964
11965         if (bp->bar1) {
11966                 pci_iounmap(pdev, bp->bar1);
11967                 bp->bar1 = NULL;
11968         }
11969
11970         if (bp->bar0) {
11971                 pci_iounmap(pdev, bp->bar0);
11972                 bp->bar0 = NULL;
11973         }
11974 }
11975
11976 static void bnxt_cleanup_pci(struct bnxt *bp)
11977 {
11978         bnxt_unmap_bars(bp, bp->pdev);
11979         pci_release_regions(bp->pdev);
11980         if (pci_is_enabled(bp->pdev))
11981                 pci_disable_device(bp->pdev);
11982 }
11983
11984 static void bnxt_init_dflt_coal(struct bnxt *bp)
11985 {
11986         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
11987         struct bnxt_coal *coal;
11988         u16 flags = 0;
11989
11990         if (coal_cap->cmpl_params &
11991             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
11992                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
11993
11994         /* Tick values in micro seconds.
11995          * 1 coal_buf x bufs_per_record = 1 completion record.
11996          */
11997         coal = &bp->rx_coal;
11998         coal->coal_ticks = 10;
11999         coal->coal_bufs = 30;
12000         coal->coal_ticks_irq = 1;
12001         coal->coal_bufs_irq = 2;
12002         coal->idle_thresh = 50;
12003         coal->bufs_per_record = 2;
12004         coal->budget = 64;              /* NAPI budget */
12005         coal->flags = flags;
12006
12007         coal = &bp->tx_coal;
12008         coal->coal_ticks = 28;
12009         coal->coal_bufs = 30;
12010         coal->coal_ticks_irq = 2;
12011         coal->coal_bufs_irq = 2;
12012         coal->bufs_per_record = 1;
12013         coal->flags = flags;
12014
12015         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
12016 }
12017
12018 static int bnxt_fw_init_one_p1(struct bnxt *bp)
12019 {
12020         int rc;
12021
12022         bp->fw_cap = 0;
12023         rc = bnxt_hwrm_ver_get(bp);
12024         bnxt_try_map_fw_health_reg(bp);
12025         if (rc) {
12026                 rc = bnxt_try_recover_fw(bp);
12027                 if (rc)
12028                         return rc;
12029                 rc = bnxt_hwrm_ver_get(bp);
12030                 if (rc)
12031                         return rc;
12032         }
12033
12034         bnxt_nvm_cfg_ver_get(bp);
12035
12036         rc = bnxt_hwrm_func_reset(bp);
12037         if (rc)
12038                 return -ENODEV;
12039
12040         bnxt_hwrm_fw_set_time(bp);
12041         return 0;
12042 }
12043
12044 static int bnxt_fw_init_one_p2(struct bnxt *bp)
12045 {
12046         int rc;
12047
12048         /* Get the MAX capabilities for this function */
12049         rc = bnxt_hwrm_func_qcaps(bp);
12050         if (rc) {
12051                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
12052                            rc);
12053                 return -ENODEV;
12054         }
12055
12056         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
12057         if (rc)
12058                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
12059                             rc);
12060
12061         if (bnxt_alloc_fw_health(bp)) {
12062                 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
12063         } else {
12064                 rc = bnxt_hwrm_error_recovery_qcfg(bp);
12065                 if (rc)
12066                         netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
12067                                     rc);
12068         }
12069
12070         rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
12071         if (rc)
12072                 return -ENODEV;
12073
12074         bnxt_hwrm_func_qcfg(bp);
12075         bnxt_hwrm_vnic_qcaps(bp);
12076         bnxt_hwrm_port_led_qcaps(bp);
12077         bnxt_ethtool_init(bp);
12078         bnxt_dcb_init(bp);
12079         return 0;
12080 }
12081
12082 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
12083 {
12084         bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
12085         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
12086                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
12087                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
12088                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
12089         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
12090                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
12091                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
12092                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
12093         }
12094 }
12095
12096 static void bnxt_set_dflt_rfs(struct bnxt *bp)
12097 {
12098         struct net_device *dev = bp->dev;
12099
12100         dev->hw_features &= ~NETIF_F_NTUPLE;
12101         dev->features &= ~NETIF_F_NTUPLE;
12102         bp->flags &= ~BNXT_FLAG_RFS;
12103         if (bnxt_rfs_supported(bp)) {
12104                 dev->hw_features |= NETIF_F_NTUPLE;
12105                 if (bnxt_rfs_capable(bp)) {
12106                         bp->flags |= BNXT_FLAG_RFS;
12107                         dev->features |= NETIF_F_NTUPLE;
12108                 }
12109         }
12110 }
12111
12112 static void bnxt_fw_init_one_p3(struct bnxt *bp)
12113 {
12114         struct pci_dev *pdev = bp->pdev;
12115
12116         bnxt_set_dflt_rss_hash_type(bp);
12117         bnxt_set_dflt_rfs(bp);
12118
12119         bnxt_get_wol_settings(bp);
12120         if (bp->flags & BNXT_FLAG_WOL_CAP)
12121                 device_set_wakeup_enable(&pdev->dev, bp->wol);
12122         else
12123                 device_set_wakeup_capable(&pdev->dev, false);
12124
12125         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
12126         bnxt_hwrm_coal_params_qcaps(bp);
12127 }
12128
12129 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
12130
12131 int bnxt_fw_init_one(struct bnxt *bp)
12132 {
12133         int rc;
12134
12135         rc = bnxt_fw_init_one_p1(bp);
12136         if (rc) {
12137                 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
12138                 return rc;
12139         }
12140         rc = bnxt_fw_init_one_p2(bp);
12141         if (rc) {
12142                 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
12143                 return rc;
12144         }
12145         rc = bnxt_probe_phy(bp, false);
12146         if (rc)
12147                 return rc;
12148         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
12149         if (rc)
12150                 return rc;
12151
12152         bnxt_fw_init_one_p3(bp);
12153         return 0;
12154 }
12155
12156 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12157 {
12158         struct bnxt_fw_health *fw_health = bp->fw_health;
12159         u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
12160         u32 val = fw_health->fw_reset_seq_vals[reg_idx];
12161         u32 reg_type, reg_off, delay_msecs;
12162
12163         delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
12164         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
12165         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
12166         switch (reg_type) {
12167         case BNXT_FW_HEALTH_REG_TYPE_CFG:
12168                 pci_write_config_dword(bp->pdev, reg_off, val);
12169                 break;
12170         case BNXT_FW_HEALTH_REG_TYPE_GRC:
12171                 writel(reg_off & BNXT_GRC_BASE_MASK,
12172                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12173                 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
12174                 fallthrough;
12175         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12176                 writel(val, bp->bar0 + reg_off);
12177                 break;
12178         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12179                 writel(val, bp->bar1 + reg_off);
12180                 break;
12181         }
12182         if (delay_msecs) {
12183                 pci_read_config_dword(bp->pdev, 0, &val);
12184                 msleep(delay_msecs);
12185         }
12186 }
12187
12188 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
12189 {
12190         struct hwrm_func_qcfg_output *resp;
12191         struct hwrm_func_qcfg_input *req;
12192         bool result = true; /* firmware will enforce if unknown */
12193
12194         if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
12195                 return result;
12196
12197         if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
12198                 return result;
12199
12200         req->fid = cpu_to_le16(0xffff);
12201         resp = hwrm_req_hold(bp, req);
12202         if (!hwrm_req_send(bp, req))
12203                 result = !!(le16_to_cpu(resp->flags) &
12204                             FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
12205         hwrm_req_drop(bp, req);
12206         return result;
12207 }
12208
12209 static void bnxt_reset_all(struct bnxt *bp)
12210 {
12211         struct bnxt_fw_health *fw_health = bp->fw_health;
12212         int i, rc;
12213
12214         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12215                 bnxt_fw_reset_via_optee(bp);
12216                 bp->fw_reset_timestamp = jiffies;
12217                 return;
12218         }
12219
12220         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12221                 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12222                         bnxt_fw_reset_writel(bp, i);
12223         } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
12224                 struct hwrm_fw_reset_input *req;
12225
12226                 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12227                 if (!rc) {
12228                         req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12229                         req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12230                         req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12231                         req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12232                         rc = hwrm_req_send(bp, req);
12233                 }
12234                 if (rc != -ENODEV)
12235                         netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12236         }
12237         bp->fw_reset_timestamp = jiffies;
12238 }
12239
12240 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12241 {
12242         return time_after(jiffies, bp->fw_reset_timestamp +
12243                           (bp->fw_reset_max_dsecs * HZ / 10));
12244 }
12245
12246 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12247 {
12248         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12249         if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12250                 bnxt_ulp_start(bp, rc);
12251                 bnxt_dl_health_fw_status_update(bp, false);
12252         }
12253         bp->fw_reset_state = 0;
12254         dev_close(bp->dev);
12255 }
12256
12257 static void bnxt_fw_reset_task(struct work_struct *work)
12258 {
12259         struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12260         int rc = 0;
12261
12262         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12263                 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12264                 return;
12265         }
12266
12267         switch (bp->fw_reset_state) {
12268         case BNXT_FW_RESET_STATE_POLL_VF: {
12269                 int n = bnxt_get_registered_vfs(bp);
12270                 int tmo;
12271
12272                 if (n < 0) {
12273                         netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12274                                    n, jiffies_to_msecs(jiffies -
12275                                    bp->fw_reset_timestamp));
12276                         goto fw_reset_abort;
12277                 } else if (n > 0) {
12278                         if (bnxt_fw_reset_timeout(bp)) {
12279                                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12280                                 bp->fw_reset_state = 0;
12281                                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12282                                            n);
12283                                 return;
12284                         }
12285                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12286                         return;
12287                 }
12288                 bp->fw_reset_timestamp = jiffies;
12289                 rtnl_lock();
12290                 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12291                         bnxt_fw_reset_abort(bp, rc);
12292                         rtnl_unlock();
12293                         return;
12294                 }
12295                 bnxt_fw_reset_close(bp);
12296                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12297                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12298                         tmo = HZ / 10;
12299                 } else {
12300                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12301                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
12302                 }
12303                 rtnl_unlock();
12304                 bnxt_queue_fw_reset_work(bp, tmo);
12305                 return;
12306         }
12307         case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12308                 u32 val;
12309
12310                 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12311                 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12312                     !bnxt_fw_reset_timeout(bp)) {
12313                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12314                         return;
12315                 }
12316
12317                 if (!bp->fw_health->primary) {
12318                         u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12319
12320                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12321                         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12322                         return;
12323                 }
12324                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12325         }
12326                 fallthrough;
12327         case BNXT_FW_RESET_STATE_RESET_FW:
12328                 bnxt_reset_all(bp);
12329                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12330                 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12331                 return;
12332         case BNXT_FW_RESET_STATE_ENABLE_DEV:
12333                 bnxt_inv_fw_health_reg(bp);
12334                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12335                     !bp->fw_reset_min_dsecs) {
12336                         u16 val;
12337
12338                         pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12339                         if (val == 0xffff) {
12340                                 if (bnxt_fw_reset_timeout(bp)) {
12341                                         netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12342                                         rc = -ETIMEDOUT;
12343                                         goto fw_reset_abort;
12344                                 }
12345                                 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12346                                 return;
12347                         }
12348                 }
12349                 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12350                 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
12351                 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
12352                     !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
12353                         bnxt_dl_remote_reload(bp);
12354                 if (pci_enable_device(bp->pdev)) {
12355                         netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12356                         rc = -ENODEV;
12357                         goto fw_reset_abort;
12358                 }
12359                 pci_set_master(bp->pdev);
12360                 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12361                 fallthrough;
12362         case BNXT_FW_RESET_STATE_POLL_FW:
12363                 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12364                 rc = bnxt_hwrm_poll(bp);
12365                 if (rc) {
12366                         if (bnxt_fw_reset_timeout(bp)) {
12367                                 netdev_err(bp->dev, "Firmware reset aborted\n");
12368                                 goto fw_reset_abort_status;
12369                         }
12370                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12371                         return;
12372                 }
12373                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12374                 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12375                 fallthrough;
12376         case BNXT_FW_RESET_STATE_OPENING:
12377                 while (!rtnl_trylock()) {
12378                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12379                         return;
12380                 }
12381                 rc = bnxt_open(bp->dev);
12382                 if (rc) {
12383                         netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12384                         bnxt_fw_reset_abort(bp, rc);
12385                         rtnl_unlock();
12386                         return;
12387                 }
12388
12389                 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
12390                     bp->fw_health->enabled) {
12391                         bp->fw_health->last_fw_reset_cnt =
12392                                 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12393                 }
12394                 bp->fw_reset_state = 0;
12395                 /* Make sure fw_reset_state is 0 before clearing the flag */
12396                 smp_mb__before_atomic();
12397                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12398                 bnxt_ulp_start(bp, 0);
12399                 bnxt_reenable_sriov(bp);
12400                 bnxt_vf_reps_alloc(bp);
12401                 bnxt_vf_reps_open(bp);
12402                 bnxt_ptp_reapply_pps(bp);
12403                 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
12404                 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
12405                         bnxt_dl_health_fw_recovery_done(bp);
12406                         bnxt_dl_health_fw_status_update(bp, true);
12407                 }
12408                 rtnl_unlock();
12409                 break;
12410         }
12411         return;
12412
12413 fw_reset_abort_status:
12414         if (bp->fw_health->status_reliable ||
12415             (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12416                 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12417
12418                 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12419         }
12420 fw_reset_abort:
12421         rtnl_lock();
12422         bnxt_fw_reset_abort(bp, rc);
12423         rtnl_unlock();
12424 }
12425
12426 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12427 {
12428         int rc;
12429         struct bnxt *bp = netdev_priv(dev);
12430
12431         SET_NETDEV_DEV(dev, &pdev->dev);
12432
12433         /* enable device (incl. PCI PM wakeup), and bus-mastering */
12434         rc = pci_enable_device(pdev);
12435         if (rc) {
12436                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12437                 goto init_err;
12438         }
12439
12440         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12441                 dev_err(&pdev->dev,
12442                         "Cannot find PCI device base address, aborting\n");
12443                 rc = -ENODEV;
12444                 goto init_err_disable;
12445         }
12446
12447         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12448         if (rc) {
12449                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12450                 goto init_err_disable;
12451         }
12452
12453         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12454             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12455                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12456                 rc = -EIO;
12457                 goto init_err_release;
12458         }
12459
12460         pci_set_master(pdev);
12461
12462         bp->dev = dev;
12463         bp->pdev = pdev;
12464
12465         /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12466          * determines the BAR size.
12467          */
12468         bp->bar0 = pci_ioremap_bar(pdev, 0);
12469         if (!bp->bar0) {
12470                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12471                 rc = -ENOMEM;
12472                 goto init_err_release;
12473         }
12474
12475         bp->bar2 = pci_ioremap_bar(pdev, 4);
12476         if (!bp->bar2) {
12477                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12478                 rc = -ENOMEM;
12479                 goto init_err_release;
12480         }
12481
12482         pci_enable_pcie_error_reporting(pdev);
12483
12484         INIT_WORK(&bp->sp_task, bnxt_sp_task);
12485         INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12486
12487         spin_lock_init(&bp->ntp_fltr_lock);
12488 #if BITS_PER_LONG == 32
12489         spin_lock_init(&bp->db_lock);
12490 #endif
12491
12492         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12493         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12494
12495         timer_setup(&bp->timer, bnxt_timer, 0);
12496         bp->current_interval = BNXT_TIMER_INTERVAL;
12497
12498         bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12499         bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12500
12501         clear_bit(BNXT_STATE_OPEN, &bp->state);
12502         return 0;
12503
12504 init_err_release:
12505         bnxt_unmap_bars(bp, pdev);
12506         pci_release_regions(pdev);
12507
12508 init_err_disable:
12509         pci_disable_device(pdev);
12510
12511 init_err:
12512         return rc;
12513 }
12514
12515 /* rtnl_lock held */
12516 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12517 {
12518         struct sockaddr *addr = p;
12519         struct bnxt *bp = netdev_priv(dev);
12520         int rc = 0;
12521
12522         if (!is_valid_ether_addr(addr->sa_data))
12523                 return -EADDRNOTAVAIL;
12524
12525         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12526                 return 0;
12527
12528         rc = bnxt_approve_mac(bp, addr->sa_data, true);
12529         if (rc)
12530                 return rc;
12531
12532         eth_hw_addr_set(dev, addr->sa_data);
12533         if (netif_running(dev)) {
12534                 bnxt_close_nic(bp, false, false);
12535                 rc = bnxt_open_nic(bp, false, false);
12536         }
12537
12538         return rc;
12539 }
12540
12541 /* rtnl_lock held */
12542 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12543 {
12544         struct bnxt *bp = netdev_priv(dev);
12545
12546         if (netif_running(dev))
12547                 bnxt_close_nic(bp, true, false);
12548
12549         dev->mtu = new_mtu;
12550         bnxt_set_ring_params(bp);
12551
12552         if (netif_running(dev))
12553                 return bnxt_open_nic(bp, true, false);
12554
12555         return 0;
12556 }
12557
12558 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12559 {
12560         struct bnxt *bp = netdev_priv(dev);
12561         bool sh = false;
12562         int rc;
12563
12564         if (tc > bp->max_tc) {
12565                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12566                            tc, bp->max_tc);
12567                 return -EINVAL;
12568         }
12569
12570         if (netdev_get_num_tc(dev) == tc)
12571                 return 0;
12572
12573         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12574                 sh = true;
12575
12576         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12577                               sh, tc, bp->tx_nr_rings_xdp);
12578         if (rc)
12579                 return rc;
12580
12581         /* Needs to close the device and do hw resource re-allocations */
12582         if (netif_running(bp->dev))
12583                 bnxt_close_nic(bp, true, false);
12584
12585         if (tc) {
12586                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12587                 netdev_set_num_tc(dev, tc);
12588         } else {
12589                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12590                 netdev_reset_tc(dev);
12591         }
12592         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12593         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12594                                bp->tx_nr_rings + bp->rx_nr_rings;
12595
12596         if (netif_running(bp->dev))
12597                 return bnxt_open_nic(bp, true, false);
12598
12599         return 0;
12600 }
12601
12602 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12603                                   void *cb_priv)
12604 {
12605         struct bnxt *bp = cb_priv;
12606
12607         if (!bnxt_tc_flower_enabled(bp) ||
12608             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12609                 return -EOPNOTSUPP;
12610
12611         switch (type) {
12612         case TC_SETUP_CLSFLOWER:
12613                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12614         default:
12615                 return -EOPNOTSUPP;
12616         }
12617 }
12618
12619 LIST_HEAD(bnxt_block_cb_list);
12620
12621 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12622                          void *type_data)
12623 {
12624         struct bnxt *bp = netdev_priv(dev);
12625
12626         switch (type) {
12627         case TC_SETUP_BLOCK:
12628                 return flow_block_cb_setup_simple(type_data,
12629                                                   &bnxt_block_cb_list,
12630                                                   bnxt_setup_tc_block_cb,
12631                                                   bp, bp, true);
12632         case TC_SETUP_QDISC_MQPRIO: {
12633                 struct tc_mqprio_qopt *mqprio = type_data;
12634
12635                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12636
12637                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12638         }
12639         default:
12640                 return -EOPNOTSUPP;
12641         }
12642 }
12643
12644 #ifdef CONFIG_RFS_ACCEL
12645 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12646                             struct bnxt_ntuple_filter *f2)
12647 {
12648         struct flow_keys *keys1 = &f1->fkeys;
12649         struct flow_keys *keys2 = &f2->fkeys;
12650
12651         if (keys1->basic.n_proto != keys2->basic.n_proto ||
12652             keys1->basic.ip_proto != keys2->basic.ip_proto)
12653                 return false;
12654
12655         if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12656                 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12657                     keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12658                         return false;
12659         } else {
12660                 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12661                            sizeof(keys1->addrs.v6addrs.src)) ||
12662                     memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12663                            sizeof(keys1->addrs.v6addrs.dst)))
12664                         return false;
12665         }
12666
12667         if (keys1->ports.ports == keys2->ports.ports &&
12668             keys1->control.flags == keys2->control.flags &&
12669             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12670             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12671                 return true;
12672
12673         return false;
12674 }
12675
12676 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12677                               u16 rxq_index, u32 flow_id)
12678 {
12679         struct bnxt *bp = netdev_priv(dev);
12680         struct bnxt_ntuple_filter *fltr, *new_fltr;
12681         struct flow_keys *fkeys;
12682         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12683         int rc = 0, idx, bit_id, l2_idx = 0;
12684         struct hlist_head *head;
12685         u32 flags;
12686
12687         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12688                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12689                 int off = 0, j;
12690
12691                 netif_addr_lock_bh(dev);
12692                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12693                         if (ether_addr_equal(eth->h_dest,
12694                                              vnic->uc_list + off)) {
12695                                 l2_idx = j + 1;
12696                                 break;
12697                         }
12698                 }
12699                 netif_addr_unlock_bh(dev);
12700                 if (!l2_idx)
12701                         return -EINVAL;
12702         }
12703         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12704         if (!new_fltr)
12705                 return -ENOMEM;
12706
12707         fkeys = &new_fltr->fkeys;
12708         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12709                 rc = -EPROTONOSUPPORT;
12710                 goto err_free;
12711         }
12712
12713         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12714              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12715             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12716              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12717                 rc = -EPROTONOSUPPORT;
12718                 goto err_free;
12719         }
12720         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12721             bp->hwrm_spec_code < 0x10601) {
12722                 rc = -EPROTONOSUPPORT;
12723                 goto err_free;
12724         }
12725         flags = fkeys->control.flags;
12726         if (((flags & FLOW_DIS_ENCAPSULATION) &&
12727              bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12728                 rc = -EPROTONOSUPPORT;
12729                 goto err_free;
12730         }
12731
12732         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12733         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12734
12735         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12736         head = &bp->ntp_fltr_hash_tbl[idx];
12737         rcu_read_lock();
12738         hlist_for_each_entry_rcu(fltr, head, hash) {
12739                 if (bnxt_fltr_match(fltr, new_fltr)) {
12740                         rcu_read_unlock();
12741                         rc = 0;
12742                         goto err_free;
12743                 }
12744         }
12745         rcu_read_unlock();
12746
12747         spin_lock_bh(&bp->ntp_fltr_lock);
12748         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12749                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
12750         if (bit_id < 0) {
12751                 spin_unlock_bh(&bp->ntp_fltr_lock);
12752                 rc = -ENOMEM;
12753                 goto err_free;
12754         }
12755
12756         new_fltr->sw_id = (u16)bit_id;
12757         new_fltr->flow_id = flow_id;
12758         new_fltr->l2_fltr_idx = l2_idx;
12759         new_fltr->rxq = rxq_index;
12760         hlist_add_head_rcu(&new_fltr->hash, head);
12761         bp->ntp_fltr_count++;
12762         spin_unlock_bh(&bp->ntp_fltr_lock);
12763
12764         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12765         bnxt_queue_sp_work(bp);
12766
12767         return new_fltr->sw_id;
12768
12769 err_free:
12770         kfree(new_fltr);
12771         return rc;
12772 }
12773
12774 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12775 {
12776         int i;
12777
12778         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12779                 struct hlist_head *head;
12780                 struct hlist_node *tmp;
12781                 struct bnxt_ntuple_filter *fltr;
12782                 int rc;
12783
12784                 head = &bp->ntp_fltr_hash_tbl[i];
12785                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12786                         bool del = false;
12787
12788                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12789                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12790                                                         fltr->flow_id,
12791                                                         fltr->sw_id)) {
12792                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
12793                                                                          fltr);
12794                                         del = true;
12795                                 }
12796                         } else {
12797                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12798                                                                        fltr);
12799                                 if (rc)
12800                                         del = true;
12801                                 else
12802                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
12803                         }
12804
12805                         if (del) {
12806                                 spin_lock_bh(&bp->ntp_fltr_lock);
12807                                 hlist_del_rcu(&fltr->hash);
12808                                 bp->ntp_fltr_count--;
12809                                 spin_unlock_bh(&bp->ntp_fltr_lock);
12810                                 synchronize_rcu();
12811                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12812                                 kfree(fltr);
12813                         }
12814                 }
12815         }
12816         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12817                 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12818 }
12819
12820 #else
12821
12822 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12823 {
12824 }
12825
12826 #endif /* CONFIG_RFS_ACCEL */
12827
12828 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12829 {
12830         struct bnxt *bp = netdev_priv(netdev);
12831         struct udp_tunnel_info ti;
12832         unsigned int cmd;
12833
12834         udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12835         if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
12836                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12837         else
12838                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12839
12840         if (ti.port)
12841                 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12842
12843         return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12844 }
12845
12846 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12847         .sync_table     = bnxt_udp_tunnel_sync,
12848         .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12849                           UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12850         .tables         = {
12851                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
12852                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12853         },
12854 };
12855
12856 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12857                                struct net_device *dev, u32 filter_mask,
12858                                int nlflags)
12859 {
12860         struct bnxt *bp = netdev_priv(dev);
12861
12862         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12863                                        nlflags, filter_mask, NULL);
12864 }
12865
12866 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12867                                u16 flags, struct netlink_ext_ack *extack)
12868 {
12869         struct bnxt *bp = netdev_priv(dev);
12870         struct nlattr *attr, *br_spec;
12871         int rem, rc = 0;
12872
12873         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12874                 return -EOPNOTSUPP;
12875
12876         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12877         if (!br_spec)
12878                 return -EINVAL;
12879
12880         nla_for_each_nested(attr, br_spec, rem) {
12881                 u16 mode;
12882
12883                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12884                         continue;
12885
12886                 if (nla_len(attr) < sizeof(mode))
12887                         return -EINVAL;
12888
12889                 mode = nla_get_u16(attr);
12890                 if (mode == bp->br_mode)
12891                         break;
12892
12893                 rc = bnxt_hwrm_set_br_mode(bp, mode);
12894                 if (!rc)
12895                         bp->br_mode = mode;
12896                 break;
12897         }
12898         return rc;
12899 }
12900
12901 int bnxt_get_port_parent_id(struct net_device *dev,
12902                             struct netdev_phys_item_id *ppid)
12903 {
12904         struct bnxt *bp = netdev_priv(dev);
12905
12906         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12907                 return -EOPNOTSUPP;
12908
12909         /* The PF and it's VF-reps only support the switchdev framework */
12910         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12911                 return -EOPNOTSUPP;
12912
12913         ppid->id_len = sizeof(bp->dsn);
12914         memcpy(ppid->id, bp->dsn, ppid->id_len);
12915
12916         return 0;
12917 }
12918
12919 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12920 {
12921         struct bnxt *bp = netdev_priv(dev);
12922
12923         return &bp->dl_port;
12924 }
12925
12926 static const struct net_device_ops bnxt_netdev_ops = {
12927         .ndo_open               = bnxt_open,
12928         .ndo_start_xmit         = bnxt_start_xmit,
12929         .ndo_stop               = bnxt_close,
12930         .ndo_get_stats64        = bnxt_get_stats64,
12931         .ndo_set_rx_mode        = bnxt_set_rx_mode,
12932         .ndo_eth_ioctl          = bnxt_ioctl,
12933         .ndo_validate_addr      = eth_validate_addr,
12934         .ndo_set_mac_address    = bnxt_change_mac_addr,
12935         .ndo_change_mtu         = bnxt_change_mtu,
12936         .ndo_fix_features       = bnxt_fix_features,
12937         .ndo_set_features       = bnxt_set_features,
12938         .ndo_features_check     = bnxt_features_check,
12939         .ndo_tx_timeout         = bnxt_tx_timeout,
12940 #ifdef CONFIG_BNXT_SRIOV
12941         .ndo_get_vf_config      = bnxt_get_vf_config,
12942         .ndo_set_vf_mac         = bnxt_set_vf_mac,
12943         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
12944         .ndo_set_vf_rate        = bnxt_set_vf_bw,
12945         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
12946         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
12947         .ndo_set_vf_trust       = bnxt_set_vf_trust,
12948 #endif
12949         .ndo_setup_tc           = bnxt_setup_tc,
12950 #ifdef CONFIG_RFS_ACCEL
12951         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
12952 #endif
12953         .ndo_bpf                = bnxt_xdp,
12954         .ndo_xdp_xmit           = bnxt_xdp_xmit,
12955         .ndo_bridge_getlink     = bnxt_bridge_getlink,
12956         .ndo_bridge_setlink     = bnxt_bridge_setlink,
12957         .ndo_get_devlink_port   = bnxt_get_devlink_port,
12958 };
12959
12960 static void bnxt_remove_one(struct pci_dev *pdev)
12961 {
12962         struct net_device *dev = pci_get_drvdata(pdev);
12963         struct bnxt *bp = netdev_priv(dev);
12964
12965         if (BNXT_PF(bp))
12966                 bnxt_sriov_disable(bp);
12967
12968         if (BNXT_PF(bp))
12969                 devlink_port_type_clear(&bp->dl_port);
12970
12971         bnxt_ptp_clear(bp);
12972         pci_disable_pcie_error_reporting(pdev);
12973         unregister_netdev(dev);
12974         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12975         /* Flush any pending tasks */
12976         cancel_work_sync(&bp->sp_task);
12977         cancel_delayed_work_sync(&bp->fw_reset_task);
12978         bp->sp_event = 0;
12979
12980         bnxt_dl_fw_reporters_destroy(bp);
12981         bnxt_dl_unregister(bp);
12982         bnxt_shutdown_tc(bp);
12983
12984         bnxt_clear_int_mode(bp);
12985         bnxt_hwrm_func_drv_unrgtr(bp);
12986         bnxt_free_hwrm_resources(bp);
12987         bnxt_ethtool_free(bp);
12988         bnxt_dcb_free(bp);
12989         kfree(bp->edev);
12990         bp->edev = NULL;
12991         kfree(bp->ptp_cfg);
12992         bp->ptp_cfg = NULL;
12993         kfree(bp->fw_health);
12994         bp->fw_health = NULL;
12995         bnxt_cleanup_pci(bp);
12996         bnxt_free_ctx_mem(bp);
12997         kfree(bp->ctx);
12998         bp->ctx = NULL;
12999         kfree(bp->rss_indir_tbl);
13000         bp->rss_indir_tbl = NULL;
13001         bnxt_free_port_stats(bp);
13002         free_netdev(dev);
13003 }
13004
13005 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
13006 {
13007         int rc = 0;
13008         struct bnxt_link_info *link_info = &bp->link_info;
13009
13010         bp->phy_flags = 0;
13011         rc = bnxt_hwrm_phy_qcaps(bp);
13012         if (rc) {
13013                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
13014                            rc);
13015                 return rc;
13016         }
13017         if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
13018                 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
13019         else
13020                 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
13021         if (!fw_dflt)
13022                 return 0;
13023
13024         mutex_lock(&bp->link_lock);
13025         rc = bnxt_update_link(bp, false);
13026         if (rc) {
13027                 mutex_unlock(&bp->link_lock);
13028                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
13029                            rc);
13030                 return rc;
13031         }
13032
13033         /* Older firmware does not have supported_auto_speeds, so assume
13034          * that all supported speeds can be autonegotiated.
13035          */
13036         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
13037                 link_info->support_auto_speeds = link_info->support_speeds;
13038
13039         bnxt_init_ethtool_link_settings(bp);
13040         mutex_unlock(&bp->link_lock);
13041         return 0;
13042 }
13043
13044 static int bnxt_get_max_irq(struct pci_dev *pdev)
13045 {
13046         u16 ctrl;
13047
13048         if (!pdev->msix_cap)
13049                 return 1;
13050
13051         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
13052         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
13053 }
13054
13055 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13056                                 int *max_cp)
13057 {
13058         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13059         int max_ring_grps = 0, max_irq;
13060
13061         *max_tx = hw_resc->max_tx_rings;
13062         *max_rx = hw_resc->max_rx_rings;
13063         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
13064         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
13065                         bnxt_get_ulp_msix_num(bp),
13066                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
13067         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
13068                 *max_cp = min_t(int, *max_cp, max_irq);
13069         max_ring_grps = hw_resc->max_hw_ring_grps;
13070         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
13071                 *max_cp -= 1;
13072                 *max_rx -= 2;
13073         }
13074         if (bp->flags & BNXT_FLAG_AGG_RINGS)
13075                 *max_rx >>= 1;
13076         if (bp->flags & BNXT_FLAG_CHIP_P5) {
13077                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
13078                 /* On P5 chips, max_cp output param should be available NQs */
13079                 *max_cp = max_irq;
13080         }
13081         *max_rx = min_t(int, *max_rx, max_ring_grps);
13082 }
13083
13084 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
13085 {
13086         int rx, tx, cp;
13087
13088         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
13089         *max_rx = rx;
13090         *max_tx = tx;
13091         if (!rx || !tx || !cp)
13092                 return -ENOMEM;
13093
13094         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
13095 }
13096
13097 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13098                                bool shared)
13099 {
13100         int rc;
13101
13102         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13103         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
13104                 /* Not enough rings, try disabling agg rings. */
13105                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
13106                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13107                 if (rc) {
13108                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
13109                         bp->flags |= BNXT_FLAG_AGG_RINGS;
13110                         return rc;
13111                 }
13112                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
13113                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13114                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13115                 bnxt_set_ring_params(bp);
13116         }
13117
13118         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
13119                 int max_cp, max_stat, max_irq;
13120
13121                 /* Reserve minimum resources for RoCE */
13122                 max_cp = bnxt_get_max_func_cp_rings(bp);
13123                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
13124                 max_irq = bnxt_get_max_func_irqs(bp);
13125                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
13126                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
13127                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
13128                         return 0;
13129
13130                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
13131                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
13132                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
13133                 max_cp = min_t(int, max_cp, max_irq);
13134                 max_cp = min_t(int, max_cp, max_stat);
13135                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
13136                 if (rc)
13137                         rc = 0;
13138         }
13139         return rc;
13140 }
13141
13142 /* In initial default shared ring setting, each shared ring must have a
13143  * RX/TX ring pair.
13144  */
13145 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
13146 {
13147         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
13148         bp->rx_nr_rings = bp->cp_nr_rings;
13149         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
13150         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13151 }
13152
13153 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
13154 {
13155         int dflt_rings, max_rx_rings, max_tx_rings, rc;
13156
13157         if (!bnxt_can_reserve_rings(bp))
13158                 return 0;
13159
13160         if (sh)
13161                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
13162         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
13163         /* Reduce default rings on multi-port cards so that total default
13164          * rings do not exceed CPU count.
13165          */
13166         if (bp->port_count > 1) {
13167                 int max_rings =
13168                         max_t(int, num_online_cpus() / bp->port_count, 1);
13169
13170                 dflt_rings = min_t(int, dflt_rings, max_rings);
13171         }
13172         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
13173         if (rc)
13174                 return rc;
13175         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
13176         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
13177         if (sh)
13178                 bnxt_trim_dflt_sh_rings(bp);
13179         else
13180                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
13181         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13182
13183         rc = __bnxt_reserve_rings(bp);
13184         if (rc && rc != -ENODEV)
13185                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
13186         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13187         if (sh)
13188                 bnxt_trim_dflt_sh_rings(bp);
13189
13190         /* Rings may have been trimmed, re-reserve the trimmed rings. */
13191         if (bnxt_need_reserve_rings(bp)) {
13192                 rc = __bnxt_reserve_rings(bp);
13193                 if (rc && rc != -ENODEV)
13194                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13195                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13196         }
13197         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13198                 bp->rx_nr_rings++;
13199                 bp->cp_nr_rings++;
13200         }
13201         if (rc) {
13202                 bp->tx_nr_rings = 0;
13203                 bp->rx_nr_rings = 0;
13204         }
13205         return rc;
13206 }
13207
13208 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13209 {
13210         int rc;
13211
13212         if (bp->tx_nr_rings)
13213                 return 0;
13214
13215         bnxt_ulp_irq_stop(bp);
13216         bnxt_clear_int_mode(bp);
13217         rc = bnxt_set_dflt_rings(bp, true);
13218         if (rc) {
13219                 if (BNXT_VF(bp) && rc == -ENODEV)
13220                         netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13221                 else
13222                         netdev_err(bp->dev, "Not enough rings available.\n");
13223                 goto init_dflt_ring_err;
13224         }
13225         rc = bnxt_init_int_mode(bp);
13226         if (rc)
13227                 goto init_dflt_ring_err;
13228
13229         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13230         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
13231                 bp->flags |= BNXT_FLAG_RFS;
13232                 bp->dev->features |= NETIF_F_NTUPLE;
13233         }
13234 init_dflt_ring_err:
13235         bnxt_ulp_irq_restart(bp, rc);
13236         return rc;
13237 }
13238
13239 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13240 {
13241         int rc;
13242
13243         ASSERT_RTNL();
13244         bnxt_hwrm_func_qcaps(bp);
13245
13246         if (netif_running(bp->dev))
13247                 __bnxt_close_nic(bp, true, false);
13248
13249         bnxt_ulp_irq_stop(bp);
13250         bnxt_clear_int_mode(bp);
13251         rc = bnxt_init_int_mode(bp);
13252         bnxt_ulp_irq_restart(bp, rc);
13253
13254         if (netif_running(bp->dev)) {
13255                 if (rc)
13256                         dev_close(bp->dev);
13257                 else
13258                         rc = bnxt_open_nic(bp, true, false);
13259         }
13260
13261         return rc;
13262 }
13263
13264 static int bnxt_init_mac_addr(struct bnxt *bp)
13265 {
13266         int rc = 0;
13267
13268         if (BNXT_PF(bp)) {
13269                 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
13270         } else {
13271 #ifdef CONFIG_BNXT_SRIOV
13272                 struct bnxt_vf_info *vf = &bp->vf;
13273                 bool strict_approval = true;
13274
13275                 if (is_valid_ether_addr(vf->mac_addr)) {
13276                         /* overwrite netdev dev_addr with admin VF MAC */
13277                         eth_hw_addr_set(bp->dev, vf->mac_addr);
13278                         /* Older PF driver or firmware may not approve this
13279                          * correctly.
13280                          */
13281                         strict_approval = false;
13282                 } else {
13283                         eth_hw_addr_random(bp->dev);
13284                 }
13285                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13286 #endif
13287         }
13288         return rc;
13289 }
13290
13291 static void bnxt_vpd_read_info(struct bnxt *bp)
13292 {
13293         struct pci_dev *pdev = bp->pdev;
13294         unsigned int vpd_size, kw_len;
13295         int pos, size;
13296         u8 *vpd_data;
13297
13298         vpd_data = pci_vpd_alloc(pdev, &vpd_size);
13299         if (IS_ERR(vpd_data)) {
13300                 pci_warn(pdev, "Unable to read VPD\n");
13301                 return;
13302         }
13303
13304         pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13305                                            PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
13306         if (pos < 0)
13307                 goto read_sn;
13308
13309         size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13310         memcpy(bp->board_partno, &vpd_data[pos], size);
13311
13312 read_sn:
13313         pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13314                                            PCI_VPD_RO_KEYWORD_SERIALNO,
13315                                            &kw_len);
13316         if (pos < 0)
13317                 goto exit;
13318
13319         size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13320         memcpy(bp->board_serialno, &vpd_data[pos], size);
13321 exit:
13322         kfree(vpd_data);
13323 }
13324
13325 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13326 {
13327         struct pci_dev *pdev = bp->pdev;
13328         u64 qword;
13329
13330         qword = pci_get_dsn(pdev);
13331         if (!qword) {
13332                 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13333                 return -EOPNOTSUPP;
13334         }
13335
13336         put_unaligned_le64(qword, dsn);
13337
13338         bp->flags |= BNXT_FLAG_DSN_VALID;
13339         return 0;
13340 }
13341
13342 static int bnxt_map_db_bar(struct bnxt *bp)
13343 {
13344         if (!bp->db_size)
13345                 return -ENODEV;
13346         bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13347         if (!bp->bar1)
13348                 return -ENOMEM;
13349         return 0;
13350 }
13351
13352 void bnxt_print_device_info(struct bnxt *bp)
13353 {
13354         netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
13355                     board_info[bp->board_idx].name,
13356                     (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
13357
13358         pcie_print_link_status(bp->pdev);
13359 }
13360
13361 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13362 {
13363         struct net_device *dev;
13364         struct bnxt *bp;
13365         int rc, max_irqs;
13366
13367         if (pci_is_bridge(pdev))
13368                 return -ENODEV;
13369
13370         /* Clear any pending DMA transactions from crash kernel
13371          * while loading driver in capture kernel.
13372          */
13373         if (is_kdump_kernel()) {
13374                 pci_clear_master(pdev);
13375                 pcie_flr(pdev);
13376         }
13377
13378         max_irqs = bnxt_get_max_irq(pdev);
13379         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13380         if (!dev)
13381                 return -ENOMEM;
13382
13383         bp = netdev_priv(dev);
13384         bp->board_idx = ent->driver_data;
13385         bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13386         bnxt_set_max_func_irqs(bp, max_irqs);
13387
13388         if (bnxt_vf_pciid(bp->board_idx))
13389                 bp->flags |= BNXT_FLAG_VF;
13390
13391         if (pdev->msix_cap)
13392                 bp->flags |= BNXT_FLAG_MSIX_CAP;
13393
13394         rc = bnxt_init_board(pdev, dev);
13395         if (rc < 0)
13396                 goto init_err_free;
13397
13398         dev->netdev_ops = &bnxt_netdev_ops;
13399         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13400         dev->ethtool_ops = &bnxt_ethtool_ops;
13401         pci_set_drvdata(pdev, dev);
13402
13403         rc = bnxt_alloc_hwrm_resources(bp);
13404         if (rc)
13405                 goto init_err_pci_clean;
13406
13407         mutex_init(&bp->hwrm_cmd_lock);
13408         mutex_init(&bp->link_lock);
13409
13410         rc = bnxt_fw_init_one_p1(bp);
13411         if (rc)
13412                 goto init_err_pci_clean;
13413
13414         if (BNXT_PF(bp))
13415                 bnxt_vpd_read_info(bp);
13416
13417         if (BNXT_CHIP_P5(bp)) {
13418                 bp->flags |= BNXT_FLAG_CHIP_P5;
13419                 if (BNXT_CHIP_SR2(bp))
13420                         bp->flags |= BNXT_FLAG_CHIP_SR2;
13421         }
13422
13423         rc = bnxt_alloc_rss_indir_tbl(bp);
13424         if (rc)
13425                 goto init_err_pci_clean;
13426
13427         rc = bnxt_fw_init_one_p2(bp);
13428         if (rc)
13429                 goto init_err_pci_clean;
13430
13431         rc = bnxt_map_db_bar(bp);
13432         if (rc) {
13433                 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13434                         rc);
13435                 goto init_err_pci_clean;
13436         }
13437
13438         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13439                            NETIF_F_TSO | NETIF_F_TSO6 |
13440                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13441                            NETIF_F_GSO_IPXIP4 |
13442                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13443                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13444                            NETIF_F_RXCSUM | NETIF_F_GRO;
13445
13446         if (BNXT_SUPPORTS_TPA(bp))
13447                 dev->hw_features |= NETIF_F_LRO;
13448
13449         dev->hw_enc_features =
13450                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13451                         NETIF_F_TSO | NETIF_F_TSO6 |
13452                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13453                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13454                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13455         dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13456
13457         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13458                                     NETIF_F_GSO_GRE_CSUM;
13459         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13460         if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13461                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13462         if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13463                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13464         if (BNXT_SUPPORTS_TPA(bp))
13465                 dev->hw_features |= NETIF_F_GRO_HW;
13466         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13467         if (dev->features & NETIF_F_GRO_HW)
13468                 dev->features &= ~NETIF_F_LRO;
13469         dev->priv_flags |= IFF_UNICAST_FLT;
13470
13471 #ifdef CONFIG_BNXT_SRIOV
13472         init_waitqueue_head(&bp->sriov_cfg_wait);
13473 #endif
13474         if (BNXT_SUPPORTS_TPA(bp)) {
13475                 bp->gro_func = bnxt_gro_func_5730x;
13476                 if (BNXT_CHIP_P4(bp))
13477                         bp->gro_func = bnxt_gro_func_5731x;
13478                 else if (BNXT_CHIP_P5(bp))
13479                         bp->gro_func = bnxt_gro_func_5750x;
13480         }
13481         if (!BNXT_CHIP_P4_PLUS(bp))
13482                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13483
13484         rc = bnxt_init_mac_addr(bp);
13485         if (rc) {
13486                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13487                 rc = -EADDRNOTAVAIL;
13488                 goto init_err_pci_clean;
13489         }
13490
13491         if (BNXT_PF(bp)) {
13492                 /* Read the adapter's DSN to use as the eswitch switch_id */
13493                 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13494         }
13495
13496         /* MTU range: 60 - FW defined max */
13497         dev->min_mtu = ETH_ZLEN;
13498         dev->max_mtu = bp->max_mtu;
13499
13500         rc = bnxt_probe_phy(bp, true);
13501         if (rc)
13502                 goto init_err_pci_clean;
13503
13504         bnxt_set_rx_skb_mode(bp, false);
13505         bnxt_set_tpa_flags(bp);
13506         bnxt_set_ring_params(bp);
13507         rc = bnxt_set_dflt_rings(bp, true);
13508         if (rc) {
13509                 if (BNXT_VF(bp) && rc == -ENODEV) {
13510                         netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13511                 } else {
13512                         netdev_err(bp->dev, "Not enough rings available.\n");
13513                         rc = -ENOMEM;
13514                 }
13515                 goto init_err_pci_clean;
13516         }
13517
13518         bnxt_fw_init_one_p3(bp);
13519
13520         bnxt_init_dflt_coal(bp);
13521
13522         if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13523                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13524
13525         rc = bnxt_init_int_mode(bp);
13526         if (rc)
13527                 goto init_err_pci_clean;
13528
13529         /* No TC has been set yet and rings may have been trimmed due to
13530          * limited MSIX, so we re-initialize the TX rings per TC.
13531          */
13532         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13533
13534         if (BNXT_PF(bp)) {
13535                 if (!bnxt_pf_wq) {
13536                         bnxt_pf_wq =
13537                                 create_singlethread_workqueue("bnxt_pf_wq");
13538                         if (!bnxt_pf_wq) {
13539                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13540                                 rc = -ENOMEM;
13541                                 goto init_err_pci_clean;
13542                         }
13543                 }
13544                 rc = bnxt_init_tc(bp);
13545                 if (rc)
13546                         netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13547                                    rc);
13548         }
13549
13550         bnxt_inv_fw_health_reg(bp);
13551         rc = bnxt_dl_register(bp);
13552         if (rc)
13553                 goto init_err_dl;
13554
13555         rc = register_netdev(dev);
13556         if (rc)
13557                 goto init_err_cleanup;
13558
13559         if (BNXT_PF(bp))
13560                 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13561         bnxt_dl_fw_reporters_create(bp);
13562
13563         bnxt_print_device_info(bp);
13564
13565         pci_save_state(pdev);
13566         return 0;
13567
13568 init_err_cleanup:
13569         bnxt_dl_unregister(bp);
13570 init_err_dl:
13571         bnxt_shutdown_tc(bp);
13572         bnxt_clear_int_mode(bp);
13573
13574 init_err_pci_clean:
13575         bnxt_hwrm_func_drv_unrgtr(bp);
13576         bnxt_free_hwrm_resources(bp);
13577         bnxt_ethtool_free(bp);
13578         bnxt_ptp_clear(bp);
13579         kfree(bp->ptp_cfg);
13580         bp->ptp_cfg = NULL;
13581         kfree(bp->fw_health);
13582         bp->fw_health = NULL;
13583         bnxt_cleanup_pci(bp);
13584         bnxt_free_ctx_mem(bp);
13585         kfree(bp->ctx);
13586         bp->ctx = NULL;
13587         kfree(bp->rss_indir_tbl);
13588         bp->rss_indir_tbl = NULL;
13589
13590 init_err_free:
13591         free_netdev(dev);
13592         return rc;
13593 }
13594
13595 static void bnxt_shutdown(struct pci_dev *pdev)
13596 {
13597         struct net_device *dev = pci_get_drvdata(pdev);
13598         struct bnxt *bp;
13599
13600         if (!dev)
13601                 return;
13602
13603         rtnl_lock();
13604         bp = netdev_priv(dev);
13605         if (!bp)
13606                 goto shutdown_exit;
13607
13608         if (netif_running(dev))
13609                 dev_close(dev);
13610
13611         bnxt_ulp_shutdown(bp);
13612         bnxt_clear_int_mode(bp);
13613         pci_disable_device(pdev);
13614
13615         if (system_state == SYSTEM_POWER_OFF) {
13616                 pci_wake_from_d3(pdev, bp->wol);
13617                 pci_set_power_state(pdev, PCI_D3hot);
13618         }
13619
13620 shutdown_exit:
13621         rtnl_unlock();
13622 }
13623
13624 #ifdef CONFIG_PM_SLEEP
13625 static int bnxt_suspend(struct device *device)
13626 {
13627         struct net_device *dev = dev_get_drvdata(device);
13628         struct bnxt *bp = netdev_priv(dev);
13629         int rc = 0;
13630
13631         rtnl_lock();
13632         bnxt_ulp_stop(bp);
13633         if (netif_running(dev)) {
13634                 netif_device_detach(dev);
13635                 rc = bnxt_close(dev);
13636         }
13637         bnxt_hwrm_func_drv_unrgtr(bp);
13638         pci_disable_device(bp->pdev);
13639         bnxt_free_ctx_mem(bp);
13640         kfree(bp->ctx);
13641         bp->ctx = NULL;
13642         rtnl_unlock();
13643         return rc;
13644 }
13645
13646 static int bnxt_resume(struct device *device)
13647 {
13648         struct net_device *dev = dev_get_drvdata(device);
13649         struct bnxt *bp = netdev_priv(dev);
13650         int rc = 0;
13651
13652         rtnl_lock();
13653         rc = pci_enable_device(bp->pdev);
13654         if (rc) {
13655                 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13656                            rc);
13657                 goto resume_exit;
13658         }
13659         pci_set_master(bp->pdev);
13660         if (bnxt_hwrm_ver_get(bp)) {
13661                 rc = -ENODEV;
13662                 goto resume_exit;
13663         }
13664         rc = bnxt_hwrm_func_reset(bp);
13665         if (rc) {
13666                 rc = -EBUSY;
13667                 goto resume_exit;
13668         }
13669
13670         rc = bnxt_hwrm_func_qcaps(bp);
13671         if (rc)
13672                 goto resume_exit;
13673
13674         if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13675                 rc = -ENODEV;
13676                 goto resume_exit;
13677         }
13678
13679         bnxt_get_wol_settings(bp);
13680         if (netif_running(dev)) {
13681                 rc = bnxt_open(dev);
13682                 if (!rc)
13683                         netif_device_attach(dev);
13684         }
13685
13686 resume_exit:
13687         bnxt_ulp_start(bp, rc);
13688         if (!rc)
13689                 bnxt_reenable_sriov(bp);
13690         rtnl_unlock();
13691         return rc;
13692 }
13693
13694 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13695 #define BNXT_PM_OPS (&bnxt_pm_ops)
13696
13697 #else
13698
13699 #define BNXT_PM_OPS NULL
13700
13701 #endif /* CONFIG_PM_SLEEP */
13702
13703 /**
13704  * bnxt_io_error_detected - called when PCI error is detected
13705  * @pdev: Pointer to PCI device
13706  * @state: The current pci connection state
13707  *
13708  * This function is called after a PCI bus error affecting
13709  * this device has been detected.
13710  */
13711 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13712                                                pci_channel_state_t state)
13713 {
13714         struct net_device *netdev = pci_get_drvdata(pdev);
13715         struct bnxt *bp = netdev_priv(netdev);
13716
13717         netdev_info(netdev, "PCI I/O error detected\n");
13718
13719         rtnl_lock();
13720         netif_device_detach(netdev);
13721
13722         bnxt_ulp_stop(bp);
13723
13724         if (state == pci_channel_io_perm_failure) {
13725                 rtnl_unlock();
13726                 return PCI_ERS_RESULT_DISCONNECT;
13727         }
13728
13729         if (state == pci_channel_io_frozen)
13730                 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13731
13732         if (netif_running(netdev))
13733                 bnxt_close(netdev);
13734
13735         if (pci_is_enabled(pdev))
13736                 pci_disable_device(pdev);
13737         bnxt_free_ctx_mem(bp);
13738         kfree(bp->ctx);
13739         bp->ctx = NULL;
13740         rtnl_unlock();
13741
13742         /* Request a slot slot reset. */
13743         return PCI_ERS_RESULT_NEED_RESET;
13744 }
13745
13746 /**
13747  * bnxt_io_slot_reset - called after the pci bus has been reset.
13748  * @pdev: Pointer to PCI device
13749  *
13750  * Restart the card from scratch, as if from a cold-boot.
13751  * At this point, the card has exprienced a hard reset,
13752  * followed by fixups by BIOS, and has its config space
13753  * set up identically to what it was at cold boot.
13754  */
13755 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13756 {
13757         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13758         struct net_device *netdev = pci_get_drvdata(pdev);
13759         struct bnxt *bp = netdev_priv(netdev);
13760         int err = 0, off;
13761
13762         netdev_info(bp->dev, "PCI Slot Reset\n");
13763
13764         rtnl_lock();
13765
13766         if (pci_enable_device(pdev)) {
13767                 dev_err(&pdev->dev,
13768                         "Cannot re-enable PCI device after reset.\n");
13769         } else {
13770                 pci_set_master(pdev);
13771                 /* Upon fatal error, our device internal logic that latches to
13772                  * BAR value is getting reset and will restore only upon
13773                  * rewritting the BARs.
13774                  *
13775                  * As pci_restore_state() does not re-write the BARs if the
13776                  * value is same as saved value earlier, driver needs to
13777                  * write the BARs to 0 to force restore, in case of fatal error.
13778                  */
13779                 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13780                                        &bp->state)) {
13781                         for (off = PCI_BASE_ADDRESS_0;
13782                              off <= PCI_BASE_ADDRESS_5; off += 4)
13783                                 pci_write_config_dword(bp->pdev, off, 0);
13784                 }
13785                 pci_restore_state(pdev);
13786                 pci_save_state(pdev);
13787
13788                 err = bnxt_hwrm_func_reset(bp);
13789                 if (!err)
13790                         result = PCI_ERS_RESULT_RECOVERED;
13791         }
13792
13793         rtnl_unlock();
13794
13795         return result;
13796 }
13797
13798 /**
13799  * bnxt_io_resume - called when traffic can start flowing again.
13800  * @pdev: Pointer to PCI device
13801  *
13802  * This callback is called when the error recovery driver tells
13803  * us that its OK to resume normal operation.
13804  */
13805 static void bnxt_io_resume(struct pci_dev *pdev)
13806 {
13807         struct net_device *netdev = pci_get_drvdata(pdev);
13808         struct bnxt *bp = netdev_priv(netdev);
13809         int err;
13810
13811         netdev_info(bp->dev, "PCI Slot Resume\n");
13812         rtnl_lock();
13813
13814         err = bnxt_hwrm_func_qcaps(bp);
13815         if (!err && netif_running(netdev))
13816                 err = bnxt_open(netdev);
13817
13818         bnxt_ulp_start(bp, err);
13819         if (!err) {
13820                 bnxt_reenable_sriov(bp);
13821                 netif_device_attach(netdev);
13822         }
13823
13824         rtnl_unlock();
13825 }
13826
13827 static const struct pci_error_handlers bnxt_err_handler = {
13828         .error_detected = bnxt_io_error_detected,
13829         .slot_reset     = bnxt_io_slot_reset,
13830         .resume         = bnxt_io_resume
13831 };
13832
13833 static struct pci_driver bnxt_pci_driver = {
13834         .name           = DRV_MODULE_NAME,
13835         .id_table       = bnxt_pci_tbl,
13836         .probe          = bnxt_init_one,
13837         .remove         = bnxt_remove_one,
13838         .shutdown       = bnxt_shutdown,
13839         .driver.pm      = BNXT_PM_OPS,
13840         .err_handler    = &bnxt_err_handler,
13841 #if defined(CONFIG_BNXT_SRIOV)
13842         .sriov_configure = bnxt_sriov_configure,
13843 #endif
13844 };
13845
13846 static int __init bnxt_init(void)
13847 {
13848         bnxt_debug_init();
13849         return pci_register_driver(&bnxt_pci_driver);
13850 }
13851
13852 static void __exit bnxt_exit(void)
13853 {
13854         pci_unregister_driver(&bnxt_pci_driver);
13855         if (bnxt_pf_wq)
13856                 destroy_workqueue(bnxt_pf_wq);
13857         bnxt_debug_exit();
13858 }
13859
13860 module_init(bnxt_init);
13861 module_exit(bnxt_exit);