ethtool: add suffix _u32 to legacy bitmap members of struct ethtool_keee
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_queues.h>
58
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_hwrm.h"
62 #include "bnxt_ulp.h"
63 #include "bnxt_sriov.h"
64 #include "bnxt_ethtool.h"
65 #include "bnxt_dcb.h"
66 #include "bnxt_xdp.h"
67 #include "bnxt_ptp.h"
68 #include "bnxt_vfr.h"
69 #include "bnxt_tc.h"
70 #include "bnxt_devlink.h"
71 #include "bnxt_debugfs.h"
72 #include "bnxt_hwmon.h"
73
74 #define BNXT_TX_TIMEOUT         (5 * HZ)
75 #define BNXT_DEF_MSG_ENABLE     (NETIF_MSG_DRV | NETIF_MSG_HW | \
76                                  NETIF_MSG_TX_ERR)
77
78 MODULE_LICENSE("GPL");
79 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
80
81 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
82 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
83 #define BNXT_RX_COPY_THRESH 256
84
85 #define BNXT_TX_PUSH_THRESH 164
86
87 /* indexed by enum board_idx */
88 static const struct {
89         char *name;
90 } board_info[] = {
91         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
92         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
93         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
94         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
95         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
96         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
97         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
98         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
99         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
100         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
101         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
102         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
103         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
104         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
105         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
106         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
107         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
108         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
109         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
110         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
111         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
112         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
113         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
114         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
115         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
116         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
117         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
118         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
119         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
120         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
121         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
122         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
123         [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
124         [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125         [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
126         [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
127         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
128         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
129         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
130         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
131         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
132         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
133         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
134         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
135         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
136         [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
137         [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
138         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
139         [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
140 };
141
142 static const struct pci_device_id bnxt_pci_tbl[] = {
143         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
144         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
145         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
146         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
147         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
148         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
149         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
150         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
151         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
152         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
153         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
154         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
155         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
156         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
157         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
158         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
159         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
160         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
161         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
162         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
163         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
164         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
165         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
166         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
167         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
168         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
169         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
170         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
171         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
172         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
173         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
174         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
175         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
176         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
177         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
178         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
179         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
180         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
181         { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
182         { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
183         { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
184         { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
185         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
186         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
187         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
188         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
189         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
190         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
191         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
192         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
193 #ifdef CONFIG_BNXT_SRIOV
194         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
195         { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
196         { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
197         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
198         { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
199         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
200         { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
201         { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
202         { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
203         { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
204         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
205         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
206         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
207         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
208         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
209         { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
210         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
211         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
212         { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
213         { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
214         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
215 #endif
216         { 0 }
217 };
218
219 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
220
221 static const u16 bnxt_vf_req_snif[] = {
222         HWRM_FUNC_CFG,
223         HWRM_FUNC_VF_CFG,
224         HWRM_PORT_PHY_QCFG,
225         HWRM_CFA_L2_FILTER_ALLOC,
226 };
227
228 static const u16 bnxt_async_events_arr[] = {
229         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
230         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
231         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
232         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
233         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
234         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
235         ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
236         ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
237         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
238         ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
239         ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
240         ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
241         ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
242         ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
243         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
244         ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
245 };
246
247 static struct workqueue_struct *bnxt_pf_wq;
248
249 static bool bnxt_vf_pciid(enum board_idx idx)
250 {
251         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
252                 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
253                 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
254                 idx == NETXTREME_E_P5_VF_HV);
255 }
256
257 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
258 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
259 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
260
261 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
262                 writel(DB_CP_IRQ_DIS_FLAGS, db)
263
264 #define BNXT_DB_CQ(db, idx)                                             \
265         writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
266
267 #define BNXT_DB_NQ_P5(db, idx)                                          \
268         bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
269                     (db)->doorbell)
270
271 #define BNXT_DB_NQ_P7(db, idx)                                          \
272         bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK |             \
273                     DB_RING_IDX(db, idx), (db)->doorbell)
274
275 #define BNXT_DB_CQ_ARM(db, idx)                                         \
276         writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
277
278 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
279         bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM |              \
280                     DB_RING_IDX(db, idx), (db)->doorbell)
281
282 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
283 {
284         if (bp->flags & BNXT_FLAG_CHIP_P7)
285                 BNXT_DB_NQ_P7(db, idx);
286         else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
287                 BNXT_DB_NQ_P5(db, idx);
288         else
289                 BNXT_DB_CQ(db, idx);
290 }
291
292 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
293 {
294         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
295                 BNXT_DB_NQ_ARM_P5(db, idx);
296         else
297                 BNXT_DB_CQ_ARM(db, idx);
298 }
299
300 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
301 {
302         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
303                 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
304                             DB_RING_IDX(db, idx), db->doorbell);
305         else
306                 BNXT_DB_CQ(db, idx);
307 }
308
309 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
310 {
311         if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
312                 return;
313
314         if (BNXT_PF(bp))
315                 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
316         else
317                 schedule_delayed_work(&bp->fw_reset_task, delay);
318 }
319
320 static void __bnxt_queue_sp_work(struct bnxt *bp)
321 {
322         if (BNXT_PF(bp))
323                 queue_work(bnxt_pf_wq, &bp->sp_task);
324         else
325                 schedule_work(&bp->sp_task);
326 }
327
328 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
329 {
330         set_bit(event, &bp->sp_event);
331         __bnxt_queue_sp_work(bp);
332 }
333
334 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
335 {
336         if (!rxr->bnapi->in_reset) {
337                 rxr->bnapi->in_reset = true;
338                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
339                         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
340                 else
341                         set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
342                 __bnxt_queue_sp_work(bp);
343         }
344         rxr->rx_next_cons = 0xffff;
345 }
346
347 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
348                           u16 curr)
349 {
350         struct bnxt_napi *bnapi = txr->bnapi;
351
352         if (bnapi->tx_fault)
353                 return;
354
355         netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
356                    txr->txq_index, txr->tx_hw_cons,
357                    txr->tx_cons, txr->tx_prod, curr);
358         WARN_ON_ONCE(1);
359         bnapi->tx_fault = 1;
360         bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
361 }
362
363 const u16 bnxt_lhint_arr[] = {
364         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
365         TX_BD_FLAGS_LHINT_512_TO_1023,
366         TX_BD_FLAGS_LHINT_1024_TO_2047,
367         TX_BD_FLAGS_LHINT_1024_TO_2047,
368         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
369         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
370         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
371         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
372         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
373         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
374         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
375         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
376         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
377         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
378         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
379         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
380         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
381         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
382         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
383 };
384
385 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
386 {
387         struct metadata_dst *md_dst = skb_metadata_dst(skb);
388
389         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
390                 return 0;
391
392         return md_dst->u.port_info.port_id;
393 }
394
395 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
396                              u16 prod)
397 {
398         /* Sync BD data before updating doorbell */
399         wmb();
400         bnxt_db_write(bp, &txr->tx_db, prod);
401         txr->kick_pending = 0;
402 }
403
404 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
405 {
406         struct bnxt *bp = netdev_priv(dev);
407         struct tx_bd *txbd, *txbd0;
408         struct tx_bd_ext *txbd1;
409         struct netdev_queue *txq;
410         int i;
411         dma_addr_t mapping;
412         unsigned int length, pad = 0;
413         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
414         u16 prod, last_frag;
415         struct pci_dev *pdev = bp->pdev;
416         struct bnxt_tx_ring_info *txr;
417         struct bnxt_sw_tx_bd *tx_buf;
418         __le32 lflags = 0;
419
420         i = skb_get_queue_mapping(skb);
421         if (unlikely(i >= bp->tx_nr_rings)) {
422                 dev_kfree_skb_any(skb);
423                 dev_core_stats_tx_dropped_inc(dev);
424                 return NETDEV_TX_OK;
425         }
426
427         txq = netdev_get_tx_queue(dev, i);
428         txr = &bp->tx_ring[bp->tx_ring_map[i]];
429         prod = txr->tx_prod;
430
431         free_size = bnxt_tx_avail(bp, txr);
432         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
433                 /* We must have raced with NAPI cleanup */
434                 if (net_ratelimit() && txr->kick_pending)
435                         netif_warn(bp, tx_err, dev,
436                                    "bnxt: ring busy w/ flush pending!\n");
437                 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
438                                         bp->tx_wake_thresh))
439                         return NETDEV_TX_BUSY;
440         }
441
442         if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
443                 goto tx_free;
444
445         length = skb->len;
446         len = skb_headlen(skb);
447         last_frag = skb_shinfo(skb)->nr_frags;
448
449         txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
450
451         tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
452         tx_buf->skb = skb;
453         tx_buf->nr_frags = last_frag;
454
455         vlan_tag_flags = 0;
456         cfa_action = bnxt_xmit_get_cfa_action(skb);
457         if (skb_vlan_tag_present(skb)) {
458                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
459                                  skb_vlan_tag_get(skb);
460                 /* Currently supports 8021Q, 8021AD vlan offloads
461                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
462                  */
463                 if (skb->vlan_proto == htons(ETH_P_8021Q))
464                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
465         }
466
467         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
468                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
469
470                 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
471                     atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
472                         if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
473                                             &ptp->tx_hdr_off)) {
474                                 if (vlan_tag_flags)
475                                         ptp->tx_hdr_off += VLAN_HLEN;
476                                 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
477                                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
478                         } else {
479                                 atomic_inc(&bp->ptp_cfg->tx_avail);
480                         }
481                 }
482         }
483
484         if (unlikely(skb->no_fcs))
485                 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
486
487         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
488             !lflags) {
489                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
490                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
491                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
492                 void __iomem *db = txr->tx_db.doorbell;
493                 void *pdata = tx_push_buf->data;
494                 u64 *end;
495                 int j, push_len;
496
497                 /* Set COAL_NOW to be ready quickly for the next push */
498                 tx_push->tx_bd_len_flags_type =
499                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
500                                         TX_BD_TYPE_LONG_TX_BD |
501                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
502                                         TX_BD_FLAGS_COAL_NOW |
503                                         TX_BD_FLAGS_PACKET_END |
504                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
505
506                 if (skb->ip_summed == CHECKSUM_PARTIAL)
507                         tx_push1->tx_bd_hsize_lflags =
508                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
509                 else
510                         tx_push1->tx_bd_hsize_lflags = 0;
511
512                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
513                 tx_push1->tx_bd_cfa_action =
514                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
515
516                 end = pdata + length;
517                 end = PTR_ALIGN(end, 8) - 1;
518                 *end = 0;
519
520                 skb_copy_from_linear_data(skb, pdata, len);
521                 pdata += len;
522                 for (j = 0; j < last_frag; j++) {
523                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
524                         void *fptr;
525
526                         fptr = skb_frag_address_safe(frag);
527                         if (!fptr)
528                                 goto normal_tx;
529
530                         memcpy(pdata, fptr, skb_frag_size(frag));
531                         pdata += skb_frag_size(frag);
532                 }
533
534                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
535                 txbd->tx_bd_haddr = txr->data_mapping;
536                 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
537                 prod = NEXT_TX(prod);
538                 tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
539                 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
540                 memcpy(txbd, tx_push1, sizeof(*txbd));
541                 prod = NEXT_TX(prod);
542                 tx_push->doorbell =
543                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
544                                     DB_RING_IDX(&txr->tx_db, prod));
545                 WRITE_ONCE(txr->tx_prod, prod);
546
547                 tx_buf->is_push = 1;
548                 netdev_tx_sent_queue(txq, skb->len);
549                 wmb();  /* Sync is_push and byte queue before pushing data */
550
551                 push_len = (length + sizeof(*tx_push) + 7) / 8;
552                 if (push_len > 16) {
553                         __iowrite64_copy(db, tx_push_buf, 16);
554                         __iowrite32_copy(db + 4, tx_push_buf + 1,
555                                          (push_len - 16) << 1);
556                 } else {
557                         __iowrite64_copy(db, tx_push_buf, push_len);
558                 }
559
560                 goto tx_done;
561         }
562
563 normal_tx:
564         if (length < BNXT_MIN_PKT_SIZE) {
565                 pad = BNXT_MIN_PKT_SIZE - length;
566                 if (skb_pad(skb, pad))
567                         /* SKB already freed. */
568                         goto tx_kick_pending;
569                 length = BNXT_MIN_PKT_SIZE;
570         }
571
572         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
573
574         if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
575                 goto tx_free;
576
577         dma_unmap_addr_set(tx_buf, mapping, mapping);
578         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
579                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
580
581         txbd->tx_bd_haddr = cpu_to_le64(mapping);
582         txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
583
584         prod = NEXT_TX(prod);
585         txbd1 = (struct tx_bd_ext *)
586                 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
587
588         txbd1->tx_bd_hsize_lflags = lflags;
589         if (skb_is_gso(skb)) {
590                 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
591                 u32 hdr_len;
592
593                 if (skb->encapsulation) {
594                         if (udp_gso)
595                                 hdr_len = skb_inner_transport_offset(skb) +
596                                           sizeof(struct udphdr);
597                         else
598                                 hdr_len = skb_inner_tcp_all_headers(skb);
599                 } else if (udp_gso) {
600                         hdr_len = skb_transport_offset(skb) +
601                                   sizeof(struct udphdr);
602                 } else {
603                         hdr_len = skb_tcp_all_headers(skb);
604                 }
605
606                 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
607                                         TX_BD_FLAGS_T_IPID |
608                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
609                 length = skb_shinfo(skb)->gso_size;
610                 txbd1->tx_bd_mss = cpu_to_le32(length);
611                 length += hdr_len;
612         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
613                 txbd1->tx_bd_hsize_lflags |=
614                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
615                 txbd1->tx_bd_mss = 0;
616         }
617
618         length >>= 9;
619         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
620                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
621                                      skb->len);
622                 i = 0;
623                 goto tx_dma_error;
624         }
625         flags |= bnxt_lhint_arr[length];
626         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
627
628         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
629         txbd1->tx_bd_cfa_action =
630                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
631         txbd0 = txbd;
632         for (i = 0; i < last_frag; i++) {
633                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
634
635                 prod = NEXT_TX(prod);
636                 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
637
638                 len = skb_frag_size(frag);
639                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
640                                            DMA_TO_DEVICE);
641
642                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
643                         goto tx_dma_error;
644
645                 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
646                 dma_unmap_addr_set(tx_buf, mapping, mapping);
647
648                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
649
650                 flags = len << TX_BD_LEN_SHIFT;
651                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
652         }
653
654         flags &= ~TX_BD_LEN;
655         txbd->tx_bd_len_flags_type =
656                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
657                             TX_BD_FLAGS_PACKET_END);
658
659         netdev_tx_sent_queue(txq, skb->len);
660
661         skb_tx_timestamp(skb);
662
663         prod = NEXT_TX(prod);
664         WRITE_ONCE(txr->tx_prod, prod);
665
666         if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
667                 bnxt_txr_db_kick(bp, txr, prod);
668         } else {
669                 if (free_size >= bp->tx_wake_thresh)
670                         txbd0->tx_bd_len_flags_type |=
671                                 cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
672                 txr->kick_pending = 1;
673         }
674
675 tx_done:
676
677         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
678                 if (netdev_xmit_more() && !tx_buf->is_push) {
679                         txbd0->tx_bd_len_flags_type &=
680                                 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
681                         bnxt_txr_db_kick(bp, txr, prod);
682                 }
683
684                 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
685                                    bp->tx_wake_thresh);
686         }
687         return NETDEV_TX_OK;
688
689 tx_dma_error:
690         if (BNXT_TX_PTP_IS_SET(lflags))
691                 atomic_inc(&bp->ptp_cfg->tx_avail);
692
693         last_frag = i;
694
695         /* start back at beginning and unmap skb */
696         prod = txr->tx_prod;
697         tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
698         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
699                          skb_headlen(skb), DMA_TO_DEVICE);
700         prod = NEXT_TX(prod);
701
702         /* unmap remaining mapped pages */
703         for (i = 0; i < last_frag; i++) {
704                 prod = NEXT_TX(prod);
705                 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
706                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
707                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
708                                DMA_TO_DEVICE);
709         }
710
711 tx_free:
712         dev_kfree_skb_any(skb);
713 tx_kick_pending:
714         if (txr->kick_pending)
715                 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
716         txr->tx_buf_ring[txr->tx_prod].skb = NULL;
717         dev_core_stats_tx_dropped_inc(dev);
718         return NETDEV_TX_OK;
719 }
720
721 static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
722                           int budget)
723 {
724         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
725         struct pci_dev *pdev = bp->pdev;
726         u16 hw_cons = txr->tx_hw_cons;
727         unsigned int tx_bytes = 0;
728         u16 cons = txr->tx_cons;
729         int tx_pkts = 0;
730
731         while (RING_TX(bp, cons) != hw_cons) {
732                 struct bnxt_sw_tx_bd *tx_buf;
733                 struct sk_buff *skb;
734                 int j, last;
735
736                 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
737                 cons = NEXT_TX(cons);
738                 skb = tx_buf->skb;
739                 tx_buf->skb = NULL;
740
741                 if (unlikely(!skb)) {
742                         bnxt_sched_reset_txr(bp, txr, cons);
743                         return;
744                 }
745
746                 tx_pkts++;
747                 tx_bytes += skb->len;
748
749                 if (tx_buf->is_push) {
750                         tx_buf->is_push = 0;
751                         goto next_tx_int;
752                 }
753
754                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
755                                  skb_headlen(skb), DMA_TO_DEVICE);
756                 last = tx_buf->nr_frags;
757
758                 for (j = 0; j < last; j++) {
759                         cons = NEXT_TX(cons);
760                         tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
761                         dma_unmap_page(
762                                 &pdev->dev,
763                                 dma_unmap_addr(tx_buf, mapping),
764                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
765                                 DMA_TO_DEVICE);
766                 }
767                 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
768                         if (BNXT_CHIP_P5(bp)) {
769                                 /* PTP worker takes ownership of the skb */
770                                 if (!bnxt_get_tx_ts_p5(bp, skb))
771                                         skb = NULL;
772                                 else
773                                         atomic_inc(&bp->ptp_cfg->tx_avail);
774                         }
775                 }
776
777 next_tx_int:
778                 cons = NEXT_TX(cons);
779
780                 dev_consume_skb_any(skb);
781         }
782
783         WRITE_ONCE(txr->tx_cons, cons);
784
785         __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
786                                    bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
787                                    READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
788 }
789
790 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
791 {
792         struct bnxt_tx_ring_info *txr;
793         int i;
794
795         bnxt_for_each_napi_tx(i, bnapi, txr) {
796                 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
797                         __bnxt_tx_int(bp, txr, budget);
798         }
799         bnapi->events &= ~BNXT_TX_CMP_EVENT;
800 }
801
802 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
803                                          struct bnxt_rx_ring_info *rxr,
804                                          unsigned int *offset,
805                                          gfp_t gfp)
806 {
807         struct page *page;
808
809         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
810                 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
811                                                 BNXT_RX_PAGE_SIZE);
812         } else {
813                 page = page_pool_dev_alloc_pages(rxr->page_pool);
814                 *offset = 0;
815         }
816         if (!page)
817                 return NULL;
818
819         *mapping = page_pool_get_dma_addr(page) + *offset;
820         return page;
821 }
822
823 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
824                                        gfp_t gfp)
825 {
826         u8 *data;
827         struct pci_dev *pdev = bp->pdev;
828
829         if (gfp == GFP_ATOMIC)
830                 data = napi_alloc_frag(bp->rx_buf_size);
831         else
832                 data = netdev_alloc_frag(bp->rx_buf_size);
833         if (!data)
834                 return NULL;
835
836         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
837                                         bp->rx_buf_use_size, bp->rx_dir,
838                                         DMA_ATTR_WEAK_ORDERING);
839
840         if (dma_mapping_error(&pdev->dev, *mapping)) {
841                 skb_free_frag(data);
842                 data = NULL;
843         }
844         return data;
845 }
846
847 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
848                        u16 prod, gfp_t gfp)
849 {
850         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
851         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
852         dma_addr_t mapping;
853
854         if (BNXT_RX_PAGE_MODE(bp)) {
855                 unsigned int offset;
856                 struct page *page =
857                         __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
858
859                 if (!page)
860                         return -ENOMEM;
861
862                 mapping += bp->rx_dma_offset;
863                 rx_buf->data = page;
864                 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
865         } else {
866                 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
867
868                 if (!data)
869                         return -ENOMEM;
870
871                 rx_buf->data = data;
872                 rx_buf->data_ptr = data + bp->rx_offset;
873         }
874         rx_buf->mapping = mapping;
875
876         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
877         return 0;
878 }
879
880 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
881 {
882         u16 prod = rxr->rx_prod;
883         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
884         struct bnxt *bp = rxr->bnapi->bp;
885         struct rx_bd *cons_bd, *prod_bd;
886
887         prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
888         cons_rx_buf = &rxr->rx_buf_ring[cons];
889
890         prod_rx_buf->data = data;
891         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
892
893         prod_rx_buf->mapping = cons_rx_buf->mapping;
894
895         prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
896         cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
897
898         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
899 }
900
901 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
902 {
903         u16 next, max = rxr->rx_agg_bmap_size;
904
905         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
906         if (next >= max)
907                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
908         return next;
909 }
910
911 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
912                                      struct bnxt_rx_ring_info *rxr,
913                                      u16 prod, gfp_t gfp)
914 {
915         struct rx_bd *rxbd =
916                 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
917         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
918         struct page *page;
919         dma_addr_t mapping;
920         u16 sw_prod = rxr->rx_sw_agg_prod;
921         unsigned int offset = 0;
922
923         page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
924
925         if (!page)
926                 return -ENOMEM;
927
928         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
929                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
930
931         __set_bit(sw_prod, rxr->rx_agg_bmap);
932         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
933         rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
934
935         rx_agg_buf->page = page;
936         rx_agg_buf->offset = offset;
937         rx_agg_buf->mapping = mapping;
938         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
939         rxbd->rx_bd_opaque = sw_prod;
940         return 0;
941 }
942
943 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
944                                        struct bnxt_cp_ring_info *cpr,
945                                        u16 cp_cons, u16 curr)
946 {
947         struct rx_agg_cmp *agg;
948
949         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
950         agg = (struct rx_agg_cmp *)
951                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
952         return agg;
953 }
954
955 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
956                                               struct bnxt_rx_ring_info *rxr,
957                                               u16 agg_id, u16 curr)
958 {
959         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
960
961         return &tpa_info->agg_arr[curr];
962 }
963
964 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
965                                    u16 start, u32 agg_bufs, bool tpa)
966 {
967         struct bnxt_napi *bnapi = cpr->bnapi;
968         struct bnxt *bp = bnapi->bp;
969         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
970         u16 prod = rxr->rx_agg_prod;
971         u16 sw_prod = rxr->rx_sw_agg_prod;
972         bool p5_tpa = false;
973         u32 i;
974
975         if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
976                 p5_tpa = true;
977
978         for (i = 0; i < agg_bufs; i++) {
979                 u16 cons;
980                 struct rx_agg_cmp *agg;
981                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
982                 struct rx_bd *prod_bd;
983                 struct page *page;
984
985                 if (p5_tpa)
986                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
987                 else
988                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
989                 cons = agg->rx_agg_cmp_opaque;
990                 __clear_bit(cons, rxr->rx_agg_bmap);
991
992                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
993                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
994
995                 __set_bit(sw_prod, rxr->rx_agg_bmap);
996                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
997                 cons_rx_buf = &rxr->rx_agg_ring[cons];
998
999                 /* It is possible for sw_prod to be equal to cons, so
1000                  * set cons_rx_buf->page to NULL first.
1001                  */
1002                 page = cons_rx_buf->page;
1003                 cons_rx_buf->page = NULL;
1004                 prod_rx_buf->page = page;
1005                 prod_rx_buf->offset = cons_rx_buf->offset;
1006
1007                 prod_rx_buf->mapping = cons_rx_buf->mapping;
1008
1009                 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1010
1011                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1012                 prod_bd->rx_bd_opaque = sw_prod;
1013
1014                 prod = NEXT_RX_AGG(prod);
1015                 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1016         }
1017         rxr->rx_agg_prod = prod;
1018         rxr->rx_sw_agg_prod = sw_prod;
1019 }
1020
1021 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1022                                               struct bnxt_rx_ring_info *rxr,
1023                                               u16 cons, void *data, u8 *data_ptr,
1024                                               dma_addr_t dma_addr,
1025                                               unsigned int offset_and_len)
1026 {
1027         unsigned int len = offset_and_len & 0xffff;
1028         struct page *page = data;
1029         u16 prod = rxr->rx_prod;
1030         struct sk_buff *skb;
1031         int err;
1032
1033         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1034         if (unlikely(err)) {
1035                 bnxt_reuse_rx_data(rxr, cons, data);
1036                 return NULL;
1037         }
1038         dma_addr -= bp->rx_dma_offset;
1039         dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1040                                 bp->rx_dir);
1041         skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1042         if (!skb) {
1043                 page_pool_recycle_direct(rxr->page_pool, page);
1044                 return NULL;
1045         }
1046         skb_mark_for_recycle(skb);
1047         skb_reserve(skb, bp->rx_offset);
1048         __skb_put(skb, len);
1049
1050         return skb;
1051 }
1052
1053 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1054                                         struct bnxt_rx_ring_info *rxr,
1055                                         u16 cons, void *data, u8 *data_ptr,
1056                                         dma_addr_t dma_addr,
1057                                         unsigned int offset_and_len)
1058 {
1059         unsigned int payload = offset_and_len >> 16;
1060         unsigned int len = offset_and_len & 0xffff;
1061         skb_frag_t *frag;
1062         struct page *page = data;
1063         u16 prod = rxr->rx_prod;
1064         struct sk_buff *skb;
1065         int off, err;
1066
1067         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1068         if (unlikely(err)) {
1069                 bnxt_reuse_rx_data(rxr, cons, data);
1070                 return NULL;
1071         }
1072         dma_addr -= bp->rx_dma_offset;
1073         dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1074                                 bp->rx_dir);
1075
1076         if (unlikely(!payload))
1077                 payload = eth_get_headlen(bp->dev, data_ptr, len);
1078
1079         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1080         if (!skb) {
1081                 page_pool_recycle_direct(rxr->page_pool, page);
1082                 return NULL;
1083         }
1084
1085         skb_mark_for_recycle(skb);
1086         off = (void *)data_ptr - page_address(page);
1087         skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1088         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1089                payload + NET_IP_ALIGN);
1090
1091         frag = &skb_shinfo(skb)->frags[0];
1092         skb_frag_size_sub(frag, payload);
1093         skb_frag_off_add(frag, payload);
1094         skb->data_len -= payload;
1095         skb->tail += payload;
1096
1097         return skb;
1098 }
1099
1100 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1101                                    struct bnxt_rx_ring_info *rxr, u16 cons,
1102                                    void *data, u8 *data_ptr,
1103                                    dma_addr_t dma_addr,
1104                                    unsigned int offset_and_len)
1105 {
1106         u16 prod = rxr->rx_prod;
1107         struct sk_buff *skb;
1108         int err;
1109
1110         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1111         if (unlikely(err)) {
1112                 bnxt_reuse_rx_data(rxr, cons, data);
1113                 return NULL;
1114         }
1115
1116         skb = napi_build_skb(data, bp->rx_buf_size);
1117         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1118                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1119         if (!skb) {
1120                 skb_free_frag(data);
1121                 return NULL;
1122         }
1123
1124         skb_reserve(skb, bp->rx_offset);
1125         skb_put(skb, offset_and_len & 0xffff);
1126         return skb;
1127 }
1128
1129 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1130                                struct bnxt_cp_ring_info *cpr,
1131                                struct skb_shared_info *shinfo,
1132                                u16 idx, u32 agg_bufs, bool tpa,
1133                                struct xdp_buff *xdp)
1134 {
1135         struct bnxt_napi *bnapi = cpr->bnapi;
1136         struct pci_dev *pdev = bp->pdev;
1137         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1138         u16 prod = rxr->rx_agg_prod;
1139         u32 i, total_frag_len = 0;
1140         bool p5_tpa = false;
1141
1142         if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1143                 p5_tpa = true;
1144
1145         for (i = 0; i < agg_bufs; i++) {
1146                 skb_frag_t *frag = &shinfo->frags[i];
1147                 u16 cons, frag_len;
1148                 struct rx_agg_cmp *agg;
1149                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1150                 struct page *page;
1151                 dma_addr_t mapping;
1152
1153                 if (p5_tpa)
1154                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1155                 else
1156                         agg = bnxt_get_agg(bp, cpr, idx, i);
1157                 cons = agg->rx_agg_cmp_opaque;
1158                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1159                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1160
1161                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1162                 skb_frag_fill_page_desc(frag, cons_rx_buf->page,
1163                                         cons_rx_buf->offset, frag_len);
1164                 shinfo->nr_frags = i + 1;
1165                 __clear_bit(cons, rxr->rx_agg_bmap);
1166
1167                 /* It is possible for bnxt_alloc_rx_page() to allocate
1168                  * a sw_prod index that equals the cons index, so we
1169                  * need to clear the cons entry now.
1170                  */
1171                 mapping = cons_rx_buf->mapping;
1172                 page = cons_rx_buf->page;
1173                 cons_rx_buf->page = NULL;
1174
1175                 if (xdp && page_is_pfmemalloc(page))
1176                         xdp_buff_set_frag_pfmemalloc(xdp);
1177
1178                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1179                         --shinfo->nr_frags;
1180                         cons_rx_buf->page = page;
1181
1182                         /* Update prod since possibly some pages have been
1183                          * allocated already.
1184                          */
1185                         rxr->rx_agg_prod = prod;
1186                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1187                         return 0;
1188                 }
1189
1190                 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1191                                         bp->rx_dir);
1192
1193                 total_frag_len += frag_len;
1194                 prod = NEXT_RX_AGG(prod);
1195         }
1196         rxr->rx_agg_prod = prod;
1197         return total_frag_len;
1198 }
1199
1200 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1201                                              struct bnxt_cp_ring_info *cpr,
1202                                              struct sk_buff *skb, u16 idx,
1203                                              u32 agg_bufs, bool tpa)
1204 {
1205         struct skb_shared_info *shinfo = skb_shinfo(skb);
1206         u32 total_frag_len = 0;
1207
1208         total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1209                                              agg_bufs, tpa, NULL);
1210         if (!total_frag_len) {
1211                 skb_mark_for_recycle(skb);
1212                 dev_kfree_skb(skb);
1213                 return NULL;
1214         }
1215
1216         skb->data_len += total_frag_len;
1217         skb->len += total_frag_len;
1218         skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
1219         return skb;
1220 }
1221
1222 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1223                                  struct bnxt_cp_ring_info *cpr,
1224                                  struct xdp_buff *xdp, u16 idx,
1225                                  u32 agg_bufs, bool tpa)
1226 {
1227         struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1228         u32 total_frag_len = 0;
1229
1230         if (!xdp_buff_has_frags(xdp))
1231                 shinfo->nr_frags = 0;
1232
1233         total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1234                                              idx, agg_bufs, tpa, xdp);
1235         if (total_frag_len) {
1236                 xdp_buff_set_frags_flag(xdp);
1237                 shinfo->nr_frags = agg_bufs;
1238                 shinfo->xdp_frags_size = total_frag_len;
1239         }
1240         return total_frag_len;
1241 }
1242
1243 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1244                                u8 agg_bufs, u32 *raw_cons)
1245 {
1246         u16 last;
1247         struct rx_agg_cmp *agg;
1248
1249         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1250         last = RING_CMP(*raw_cons);
1251         agg = (struct rx_agg_cmp *)
1252                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1253         return RX_AGG_CMP_VALID(agg, *raw_cons);
1254 }
1255
1256 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1257                                             unsigned int len,
1258                                             dma_addr_t mapping)
1259 {
1260         struct bnxt *bp = bnapi->bp;
1261         struct pci_dev *pdev = bp->pdev;
1262         struct sk_buff *skb;
1263
1264         skb = napi_alloc_skb(&bnapi->napi, len);
1265         if (!skb)
1266                 return NULL;
1267
1268         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1269                                 bp->rx_dir);
1270
1271         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1272                len + NET_IP_ALIGN);
1273
1274         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1275                                    bp->rx_dir);
1276
1277         skb_put(skb, len);
1278         return skb;
1279 }
1280
1281 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1282                            u32 *raw_cons, void *cmp)
1283 {
1284         struct rx_cmp *rxcmp = cmp;
1285         u32 tmp_raw_cons = *raw_cons;
1286         u8 cmp_type, agg_bufs = 0;
1287
1288         cmp_type = RX_CMP_TYPE(rxcmp);
1289
1290         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1291                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1292                             RX_CMP_AGG_BUFS) >>
1293                            RX_CMP_AGG_BUFS_SHIFT;
1294         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1295                 struct rx_tpa_end_cmp *tpa_end = cmp;
1296
1297                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1298                         return 0;
1299
1300                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1301         }
1302
1303         if (agg_bufs) {
1304                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1305                         return -EBUSY;
1306         }
1307         *raw_cons = tmp_raw_cons;
1308         return 0;
1309 }
1310
1311 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1312 {
1313         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1314         u16 idx = agg_id & MAX_TPA_P5_MASK;
1315
1316         if (test_bit(idx, map->agg_idx_bmap))
1317                 idx = find_first_zero_bit(map->agg_idx_bmap,
1318                                           BNXT_AGG_IDX_BMAP_SIZE);
1319         __set_bit(idx, map->agg_idx_bmap);
1320         map->agg_id_tbl[agg_id] = idx;
1321         return idx;
1322 }
1323
1324 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1325 {
1326         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1327
1328         __clear_bit(idx, map->agg_idx_bmap);
1329 }
1330
1331 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1332 {
1333         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1334
1335         return map->agg_id_tbl[agg_id];
1336 }
1337
1338 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1339                               struct rx_tpa_start_cmp *tpa_start,
1340                               struct rx_tpa_start_cmp_ext *tpa_start1)
1341 {
1342         tpa_info->cfa_code_valid = 1;
1343         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1344         tpa_info->vlan_valid = 0;
1345         if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1346                 tpa_info->vlan_valid = 1;
1347                 tpa_info->metadata =
1348                         le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1349         }
1350 }
1351
1352 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1353                                  struct rx_tpa_start_cmp *tpa_start,
1354                                  struct rx_tpa_start_cmp_ext *tpa_start1)
1355 {
1356         tpa_info->vlan_valid = 0;
1357         if (TPA_START_VLAN_VALID(tpa_start)) {
1358                 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1359                 u32 vlan_proto = ETH_P_8021Q;
1360
1361                 tpa_info->vlan_valid = 1;
1362                 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1363                         vlan_proto = ETH_P_8021AD;
1364                 tpa_info->metadata = vlan_proto << 16 |
1365                                      TPA_START_METADATA0_TCI(tpa_start1);
1366         }
1367 }
1368
1369 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1370                            u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1371                            struct rx_tpa_start_cmp_ext *tpa_start1)
1372 {
1373         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1374         struct bnxt_tpa_info *tpa_info;
1375         u16 cons, prod, agg_id;
1376         struct rx_bd *prod_bd;
1377         dma_addr_t mapping;
1378
1379         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1380                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1381                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1382         } else {
1383                 agg_id = TPA_START_AGG_ID(tpa_start);
1384         }
1385         cons = tpa_start->rx_tpa_start_cmp_opaque;
1386         prod = rxr->rx_prod;
1387         cons_rx_buf = &rxr->rx_buf_ring[cons];
1388         prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1389         tpa_info = &rxr->rx_tpa[agg_id];
1390
1391         if (unlikely(cons != rxr->rx_next_cons ||
1392                      TPA_START_ERROR(tpa_start))) {
1393                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1394                             cons, rxr->rx_next_cons,
1395                             TPA_START_ERROR_CODE(tpa_start1));
1396                 bnxt_sched_reset_rxr(bp, rxr);
1397                 return;
1398         }
1399         prod_rx_buf->data = tpa_info->data;
1400         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1401
1402         mapping = tpa_info->mapping;
1403         prod_rx_buf->mapping = mapping;
1404
1405         prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1406
1407         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1408
1409         tpa_info->data = cons_rx_buf->data;
1410         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1411         cons_rx_buf->data = NULL;
1412         tpa_info->mapping = cons_rx_buf->mapping;
1413
1414         tpa_info->len =
1415                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1416                                 RX_TPA_START_CMP_LEN_SHIFT;
1417         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1418                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1419                 tpa_info->gso_type = SKB_GSO_TCPV4;
1420                 if (TPA_START_IS_IPV6(tpa_start1))
1421                         tpa_info->gso_type = SKB_GSO_TCPV6;
1422                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1423                 else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
1424                          TPA_START_HASH_TYPE(tpa_start) == 3)
1425                         tpa_info->gso_type = SKB_GSO_TCPV6;
1426                 tpa_info->rss_hash =
1427                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1428         } else {
1429                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1430                 tpa_info->gso_type = 0;
1431                 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1432         }
1433         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1434         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1435         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1436                 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1437         else
1438                 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1439         tpa_info->agg_count = 0;
1440
1441         rxr->rx_prod = NEXT_RX(prod);
1442         cons = RING_RX(bp, NEXT_RX(cons));
1443         rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1444         cons_rx_buf = &rxr->rx_buf_ring[cons];
1445
1446         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1447         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1448         cons_rx_buf->data = NULL;
1449 }
1450
1451 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1452 {
1453         if (agg_bufs)
1454                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1455 }
1456
1457 #ifdef CONFIG_INET
1458 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1459 {
1460         struct udphdr *uh = NULL;
1461
1462         if (ip_proto == htons(ETH_P_IP)) {
1463                 struct iphdr *iph = (struct iphdr *)skb->data;
1464
1465                 if (iph->protocol == IPPROTO_UDP)
1466                         uh = (struct udphdr *)(iph + 1);
1467         } else {
1468                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1469
1470                 if (iph->nexthdr == IPPROTO_UDP)
1471                         uh = (struct udphdr *)(iph + 1);
1472         }
1473         if (uh) {
1474                 if (uh->check)
1475                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1476                 else
1477                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1478         }
1479 }
1480 #endif
1481
1482 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1483                                            int payload_off, int tcp_ts,
1484                                            struct sk_buff *skb)
1485 {
1486 #ifdef CONFIG_INET
1487         struct tcphdr *th;
1488         int len, nw_off;
1489         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1490         u32 hdr_info = tpa_info->hdr_info;
1491         bool loopback = false;
1492
1493         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1494         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1495         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1496
1497         /* If the packet is an internal loopback packet, the offsets will
1498          * have an extra 4 bytes.
1499          */
1500         if (inner_mac_off == 4) {
1501                 loopback = true;
1502         } else if (inner_mac_off > 4) {
1503                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1504                                             ETH_HLEN - 2));
1505
1506                 /* We only support inner iPv4/ipv6.  If we don't see the
1507                  * correct protocol ID, it must be a loopback packet where
1508                  * the offsets are off by 4.
1509                  */
1510                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1511                         loopback = true;
1512         }
1513         if (loopback) {
1514                 /* internal loopback packet, subtract all offsets by 4 */
1515                 inner_ip_off -= 4;
1516                 inner_mac_off -= 4;
1517                 outer_ip_off -= 4;
1518         }
1519
1520         nw_off = inner_ip_off - ETH_HLEN;
1521         skb_set_network_header(skb, nw_off);
1522         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1523                 struct ipv6hdr *iph = ipv6_hdr(skb);
1524
1525                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1526                 len = skb->len - skb_transport_offset(skb);
1527                 th = tcp_hdr(skb);
1528                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1529         } else {
1530                 struct iphdr *iph = ip_hdr(skb);
1531
1532                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1533                 len = skb->len - skb_transport_offset(skb);
1534                 th = tcp_hdr(skb);
1535                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1536         }
1537
1538         if (inner_mac_off) { /* tunnel */
1539                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1540                                             ETH_HLEN - 2));
1541
1542                 bnxt_gro_tunnel(skb, proto);
1543         }
1544 #endif
1545         return skb;
1546 }
1547
1548 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1549                                            int payload_off, int tcp_ts,
1550                                            struct sk_buff *skb)
1551 {
1552 #ifdef CONFIG_INET
1553         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1554         u32 hdr_info = tpa_info->hdr_info;
1555         int iphdr_len, nw_off;
1556
1557         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1558         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1559         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1560
1561         nw_off = inner_ip_off - ETH_HLEN;
1562         skb_set_network_header(skb, nw_off);
1563         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1564                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1565         skb_set_transport_header(skb, nw_off + iphdr_len);
1566
1567         if (inner_mac_off) { /* tunnel */
1568                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1569                                             ETH_HLEN - 2));
1570
1571                 bnxt_gro_tunnel(skb, proto);
1572         }
1573 #endif
1574         return skb;
1575 }
1576
1577 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1578 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1579
1580 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1581                                            int payload_off, int tcp_ts,
1582                                            struct sk_buff *skb)
1583 {
1584 #ifdef CONFIG_INET
1585         struct tcphdr *th;
1586         int len, nw_off, tcp_opt_len = 0;
1587
1588         if (tcp_ts)
1589                 tcp_opt_len = 12;
1590
1591         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1592                 struct iphdr *iph;
1593
1594                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1595                          ETH_HLEN;
1596                 skb_set_network_header(skb, nw_off);
1597                 iph = ip_hdr(skb);
1598                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1599                 len = skb->len - skb_transport_offset(skb);
1600                 th = tcp_hdr(skb);
1601                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1602         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1603                 struct ipv6hdr *iph;
1604
1605                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1606                          ETH_HLEN;
1607                 skb_set_network_header(skb, nw_off);
1608                 iph = ipv6_hdr(skb);
1609                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1610                 len = skb->len - skb_transport_offset(skb);
1611                 th = tcp_hdr(skb);
1612                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1613         } else {
1614                 dev_kfree_skb_any(skb);
1615                 return NULL;
1616         }
1617
1618         if (nw_off) /* tunnel */
1619                 bnxt_gro_tunnel(skb, skb->protocol);
1620 #endif
1621         return skb;
1622 }
1623
1624 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1625                                            struct bnxt_tpa_info *tpa_info,
1626                                            struct rx_tpa_end_cmp *tpa_end,
1627                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1628                                            struct sk_buff *skb)
1629 {
1630 #ifdef CONFIG_INET
1631         int payload_off;
1632         u16 segs;
1633
1634         segs = TPA_END_TPA_SEGS(tpa_end);
1635         if (segs == 1)
1636                 return skb;
1637
1638         NAPI_GRO_CB(skb)->count = segs;
1639         skb_shinfo(skb)->gso_size =
1640                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1641         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1642         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1643                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1644         else
1645                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1646         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1647         if (likely(skb))
1648                 tcp_gro_complete(skb);
1649 #endif
1650         return skb;
1651 }
1652
1653 /* Given the cfa_code of a received packet determine which
1654  * netdev (vf-rep or PF) the packet is destined to.
1655  */
1656 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1657 {
1658         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1659
1660         /* if vf-rep dev is NULL, the must belongs to the PF */
1661         return dev ? dev : bp->dev;
1662 }
1663
1664 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1665                                            struct bnxt_cp_ring_info *cpr,
1666                                            u32 *raw_cons,
1667                                            struct rx_tpa_end_cmp *tpa_end,
1668                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1669                                            u8 *event)
1670 {
1671         struct bnxt_napi *bnapi = cpr->bnapi;
1672         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1673         struct net_device *dev = bp->dev;
1674         u8 *data_ptr, agg_bufs;
1675         unsigned int len;
1676         struct bnxt_tpa_info *tpa_info;
1677         dma_addr_t mapping;
1678         struct sk_buff *skb;
1679         u16 idx = 0, agg_id;
1680         void *data;
1681         bool gro;
1682
1683         if (unlikely(bnapi->in_reset)) {
1684                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1685
1686                 if (rc < 0)
1687                         return ERR_PTR(-EBUSY);
1688                 return NULL;
1689         }
1690
1691         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1692                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1693                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1694                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1695                 tpa_info = &rxr->rx_tpa[agg_id];
1696                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1697                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1698                                     agg_bufs, tpa_info->agg_count);
1699                         agg_bufs = tpa_info->agg_count;
1700                 }
1701                 tpa_info->agg_count = 0;
1702                 *event |= BNXT_AGG_EVENT;
1703                 bnxt_free_agg_idx(rxr, agg_id);
1704                 idx = agg_id;
1705                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1706         } else {
1707                 agg_id = TPA_END_AGG_ID(tpa_end);
1708                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1709                 tpa_info = &rxr->rx_tpa[agg_id];
1710                 idx = RING_CMP(*raw_cons);
1711                 if (agg_bufs) {
1712                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1713                                 return ERR_PTR(-EBUSY);
1714
1715                         *event |= BNXT_AGG_EVENT;
1716                         idx = NEXT_CMP(idx);
1717                 }
1718                 gro = !!TPA_END_GRO(tpa_end);
1719         }
1720         data = tpa_info->data;
1721         data_ptr = tpa_info->data_ptr;
1722         prefetch(data_ptr);
1723         len = tpa_info->len;
1724         mapping = tpa_info->mapping;
1725
1726         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1727                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1728                 if (agg_bufs > MAX_SKB_FRAGS)
1729                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1730                                     agg_bufs, (int)MAX_SKB_FRAGS);
1731                 return NULL;
1732         }
1733
1734         if (len <= bp->rx_copy_thresh) {
1735                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1736                 if (!skb) {
1737                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1738                         cpr->sw_stats.rx.rx_oom_discards += 1;
1739                         return NULL;
1740                 }
1741         } else {
1742                 u8 *new_data;
1743                 dma_addr_t new_mapping;
1744
1745                 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
1746                 if (!new_data) {
1747                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1748                         cpr->sw_stats.rx.rx_oom_discards += 1;
1749                         return NULL;
1750                 }
1751
1752                 tpa_info->data = new_data;
1753                 tpa_info->data_ptr = new_data + bp->rx_offset;
1754                 tpa_info->mapping = new_mapping;
1755
1756                 skb = napi_build_skb(data, bp->rx_buf_size);
1757                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1758                                        bp->rx_buf_use_size, bp->rx_dir,
1759                                        DMA_ATTR_WEAK_ORDERING);
1760
1761                 if (!skb) {
1762                         skb_free_frag(data);
1763                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1764                         cpr->sw_stats.rx.rx_oom_discards += 1;
1765                         return NULL;
1766                 }
1767                 skb_reserve(skb, bp->rx_offset);
1768                 skb_put(skb, len);
1769         }
1770
1771         if (agg_bufs) {
1772                 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1773                 if (!skb) {
1774                         /* Page reuse already handled by bnxt_rx_pages(). */
1775                         cpr->sw_stats.rx.rx_oom_discards += 1;
1776                         return NULL;
1777                 }
1778         }
1779
1780         if (tpa_info->cfa_code_valid)
1781                 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1782         skb->protocol = eth_type_trans(skb, dev);
1783
1784         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1785                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1786
1787         if (tpa_info->vlan_valid &&
1788             (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1789                 __be16 vlan_proto = htons(tpa_info->metadata >>
1790                                           RX_CMP_FLAGS2_METADATA_TPID_SFT);
1791                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1792
1793                 if (eth_type_vlan(vlan_proto)) {
1794                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1795                 } else {
1796                         dev_kfree_skb(skb);
1797                         return NULL;
1798                 }
1799         }
1800
1801         skb_checksum_none_assert(skb);
1802         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1803                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1804                 skb->csum_level =
1805                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1806         }
1807
1808         if (gro)
1809                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1810
1811         return skb;
1812 }
1813
1814 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1815                          struct rx_agg_cmp *rx_agg)
1816 {
1817         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1818         struct bnxt_tpa_info *tpa_info;
1819
1820         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1821         tpa_info = &rxr->rx_tpa[agg_id];
1822         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1823         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1824 }
1825
1826 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1827                              struct sk_buff *skb)
1828 {
1829         skb_mark_for_recycle(skb);
1830
1831         if (skb->dev != bp->dev) {
1832                 /* this packet belongs to a vf-rep */
1833                 bnxt_vf_rep_rx(bp, skb);
1834                 return;
1835         }
1836         skb_record_rx_queue(skb, bnapi->index);
1837         napi_gro_receive(&bnapi->napi, skb);
1838 }
1839
1840 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
1841                              struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
1842 {
1843         u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1844
1845         if (BNXT_PTP_RX_TS_VALID(flags))
1846                 goto ts_valid;
1847         if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
1848                 return false;
1849
1850 ts_valid:
1851         *cmpl_ts = ts;
1852         return true;
1853 }
1854
1855 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
1856                                     struct rx_cmp *rxcmp,
1857                                     struct rx_cmp_ext *rxcmp1)
1858 {
1859         __be16 vlan_proto;
1860         u16 vtag;
1861
1862         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1863                 __le32 flags2 = rxcmp1->rx_cmp_flags2;
1864                 u32 meta_data;
1865
1866                 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
1867                         return skb;
1868
1869                 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1870                 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1871                 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
1872                 if (eth_type_vlan(vlan_proto))
1873                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1874                 else
1875                         goto vlan_err;
1876         } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
1877                 if (RX_CMP_VLAN_VALID(rxcmp)) {
1878                         u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
1879
1880                         if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
1881                                 vlan_proto = htons(ETH_P_8021Q);
1882                         else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
1883                                 vlan_proto = htons(ETH_P_8021AD);
1884                         else
1885                                 goto vlan_err;
1886                         vtag = RX_CMP_METADATA0_TCI(rxcmp1);
1887                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1888                 }
1889         }
1890         return skb;
1891 vlan_err:
1892         dev_kfree_skb(skb);
1893         return NULL;
1894 }
1895
1896 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
1897                                            struct rx_cmp *rxcmp)
1898 {
1899         u8 ext_op;
1900
1901         ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
1902         switch (ext_op) {
1903         case EXT_OP_INNER_4:
1904         case EXT_OP_OUTER_4:
1905         case EXT_OP_INNFL_3:
1906         case EXT_OP_OUTFL_3:
1907                 return PKT_HASH_TYPE_L4;
1908         default:
1909                 return PKT_HASH_TYPE_L3;
1910         }
1911 }
1912
1913 /* returns the following:
1914  * 1       - 1 packet successfully received
1915  * 0       - successful TPA_START, packet not completed yet
1916  * -EBUSY  - completion ring does not have all the agg buffers yet
1917  * -ENOMEM - packet aborted due to out of memory
1918  * -EIO    - packet aborted due to hw error indicated in BD
1919  */
1920 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1921                        u32 *raw_cons, u8 *event)
1922 {
1923         struct bnxt_napi *bnapi = cpr->bnapi;
1924         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1925         struct net_device *dev = bp->dev;
1926         struct rx_cmp *rxcmp;
1927         struct rx_cmp_ext *rxcmp1;
1928         u32 tmp_raw_cons = *raw_cons;
1929         u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1930         struct bnxt_sw_rx_bd *rx_buf;
1931         unsigned int len;
1932         u8 *data_ptr, agg_bufs, cmp_type;
1933         bool xdp_active = false;
1934         dma_addr_t dma_addr;
1935         struct sk_buff *skb;
1936         struct xdp_buff xdp;
1937         u32 flags, misc;
1938         u32 cmpl_ts;
1939         void *data;
1940         int rc = 0;
1941
1942         rxcmp = (struct rx_cmp *)
1943                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1944
1945         cmp_type = RX_CMP_TYPE(rxcmp);
1946
1947         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1948                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1949                 goto next_rx_no_prod_no_len;
1950         }
1951
1952         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1953         cp_cons = RING_CMP(tmp_raw_cons);
1954         rxcmp1 = (struct rx_cmp_ext *)
1955                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1956
1957         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1958                 return -EBUSY;
1959
1960         /* The valid test of the entry must be done first before
1961          * reading any further.
1962          */
1963         dma_rmb();
1964         prod = rxr->rx_prod;
1965
1966         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
1967             cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
1968                 bnxt_tpa_start(bp, rxr, cmp_type,
1969                                (struct rx_tpa_start_cmp *)rxcmp,
1970                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1971
1972                 *event |= BNXT_RX_EVENT;
1973                 goto next_rx_no_prod_no_len;
1974
1975         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1976                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1977                                    (struct rx_tpa_end_cmp *)rxcmp,
1978                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1979
1980                 if (IS_ERR(skb))
1981                         return -EBUSY;
1982
1983                 rc = -ENOMEM;
1984                 if (likely(skb)) {
1985                         bnxt_deliver_skb(bp, bnapi, skb);
1986                         rc = 1;
1987                 }
1988                 *event |= BNXT_RX_EVENT;
1989                 goto next_rx_no_prod_no_len;
1990         }
1991
1992         cons = rxcmp->rx_cmp_opaque;
1993         if (unlikely(cons != rxr->rx_next_cons)) {
1994                 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1995
1996                 /* 0xffff is forced error, don't print it */
1997                 if (rxr->rx_next_cons != 0xffff)
1998                         netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1999                                     cons, rxr->rx_next_cons);
2000                 bnxt_sched_reset_rxr(bp, rxr);
2001                 if (rc1)
2002                         return rc1;
2003                 goto next_rx_no_prod_no_len;
2004         }
2005         rx_buf = &rxr->rx_buf_ring[cons];
2006         data = rx_buf->data;
2007         data_ptr = rx_buf->data_ptr;
2008         prefetch(data_ptr);
2009
2010         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2011         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2012
2013         if (agg_bufs) {
2014                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2015                         return -EBUSY;
2016
2017                 cp_cons = NEXT_CMP(cp_cons);
2018                 *event |= BNXT_AGG_EVENT;
2019         }
2020         *event |= BNXT_RX_EVENT;
2021
2022         rx_buf->data = NULL;
2023         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2024                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2025
2026                 bnxt_reuse_rx_data(rxr, cons, data);
2027                 if (agg_bufs)
2028                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2029                                                false);
2030
2031                 rc = -EIO;
2032                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2033                         bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
2034                         if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2035                             !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2036                                 netdev_warn_once(bp->dev, "RX buffer error %x\n",
2037                                                  rx_err);
2038                                 bnxt_sched_reset_rxr(bp, rxr);
2039                         }
2040                 }
2041                 goto next_rx_no_len;
2042         }
2043
2044         flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2045         len = flags >> RX_CMP_LEN_SHIFT;
2046         dma_addr = rx_buf->mapping;
2047
2048         if (bnxt_xdp_attached(bp, rxr)) {
2049                 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2050                 if (agg_bufs) {
2051                         u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
2052                                                              cp_cons, agg_bufs,
2053                                                              false);
2054                         if (!frag_len) {
2055                                 cpr->sw_stats.rx.rx_oom_discards += 1;
2056                                 rc = -ENOMEM;
2057                                 goto next_rx;
2058                         }
2059                 }
2060                 xdp_active = true;
2061         }
2062
2063         if (xdp_active) {
2064                 if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
2065                         rc = 1;
2066                         goto next_rx;
2067                 }
2068         }
2069
2070         if (len <= bp->rx_copy_thresh) {
2071                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2072                 bnxt_reuse_rx_data(rxr, cons, data);
2073                 if (!skb) {
2074                         if (agg_bufs) {
2075                                 if (!xdp_active)
2076                                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2077                                                                agg_bufs, false);
2078                                 else
2079                                         bnxt_xdp_buff_frags_free(rxr, &xdp);
2080                         }
2081                         cpr->sw_stats.rx.rx_oom_discards += 1;
2082                         rc = -ENOMEM;
2083                         goto next_rx;
2084                 }
2085         } else {
2086                 u32 payload;
2087
2088                 if (rx_buf->data_ptr == data_ptr)
2089                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
2090                 else
2091                         payload = 0;
2092                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2093                                       payload | len);
2094                 if (!skb) {
2095                         cpr->sw_stats.rx.rx_oom_discards += 1;
2096                         rc = -ENOMEM;
2097                         goto next_rx;
2098                 }
2099         }
2100
2101         if (agg_bufs) {
2102                 if (!xdp_active) {
2103                         skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
2104                         if (!skb) {
2105                                 cpr->sw_stats.rx.rx_oom_discards += 1;
2106                                 rc = -ENOMEM;
2107                                 goto next_rx;
2108                         }
2109                 } else {
2110                         skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
2111                         if (!skb) {
2112                                 /* we should be able to free the old skb here */
2113                                 bnxt_xdp_buff_frags_free(rxr, &xdp);
2114                                 cpr->sw_stats.rx.rx_oom_discards += 1;
2115                                 rc = -ENOMEM;
2116                                 goto next_rx;
2117                         }
2118                 }
2119         }
2120
2121         if (RX_CMP_HASH_VALID(rxcmp)) {
2122                 enum pkt_hash_types type;
2123
2124                 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2125                         type = bnxt_rss_ext_op(bp, rxcmp);
2126                 } else {
2127                         u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
2128
2129                         /* RSS profiles 1 and 3 with extract code 0 for inner
2130                          * 4-tuple
2131                          */
2132                         if (hash_type != 1 && hash_type != 3)
2133                                 type = PKT_HASH_TYPE_L3;
2134                         else
2135                                 type = PKT_HASH_TYPE_L4;
2136                 }
2137                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2138         }
2139
2140         if (cmp_type == CMP_TYPE_RX_L2_CMP)
2141                 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2142         skb->protocol = eth_type_trans(skb, dev);
2143
2144         if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2145                 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2146                 if (!skb)
2147                         goto next_rx;
2148         }
2149
2150         skb_checksum_none_assert(skb);
2151         if (RX_CMP_L4_CS_OK(rxcmp1)) {
2152                 if (dev->features & NETIF_F_RXCSUM) {
2153                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2154                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2155                 }
2156         } else {
2157                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2158                         if (dev->features & NETIF_F_RXCSUM)
2159                                 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
2160                 }
2161         }
2162
2163         if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2164                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2165                         u64 ns, ts;
2166
2167                         if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2168                                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2169
2170                                 spin_lock_bh(&ptp->ptp_lock);
2171                                 ns = timecounter_cyc2time(&ptp->tc, ts);
2172                                 spin_unlock_bh(&ptp->ptp_lock);
2173                                 memset(skb_hwtstamps(skb), 0,
2174                                        sizeof(*skb_hwtstamps(skb)));
2175                                 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2176                         }
2177                 }
2178         }
2179         bnxt_deliver_skb(bp, bnapi, skb);
2180         rc = 1;
2181
2182 next_rx:
2183         cpr->rx_packets += 1;
2184         cpr->rx_bytes += len;
2185
2186 next_rx_no_len:
2187         rxr->rx_prod = NEXT_RX(prod);
2188         rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2189
2190 next_rx_no_prod_no_len:
2191         *raw_cons = tmp_raw_cons;
2192
2193         return rc;
2194 }
2195
2196 /* In netpoll mode, if we are using a combined completion ring, we need to
2197  * discard the rx packets and recycle the buffers.
2198  */
2199 static int bnxt_force_rx_discard(struct bnxt *bp,
2200                                  struct bnxt_cp_ring_info *cpr,
2201                                  u32 *raw_cons, u8 *event)
2202 {
2203         u32 tmp_raw_cons = *raw_cons;
2204         struct rx_cmp_ext *rxcmp1;
2205         struct rx_cmp *rxcmp;
2206         u16 cp_cons;
2207         u8 cmp_type;
2208         int rc;
2209
2210         cp_cons = RING_CMP(tmp_raw_cons);
2211         rxcmp = (struct rx_cmp *)
2212                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2213
2214         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2215         cp_cons = RING_CMP(tmp_raw_cons);
2216         rxcmp1 = (struct rx_cmp_ext *)
2217                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2218
2219         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2220                 return -EBUSY;
2221
2222         /* The valid test of the entry must be done first before
2223          * reading any further.
2224          */
2225         dma_rmb();
2226         cmp_type = RX_CMP_TYPE(rxcmp);
2227         if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2228             cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2229                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2230                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2231         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2232                 struct rx_tpa_end_cmp_ext *tpa_end1;
2233
2234                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2235                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2236                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2237         }
2238         rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2239         if (rc && rc != -EBUSY)
2240                 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2241         return rc;
2242 }
2243
2244 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2245 {
2246         struct bnxt_fw_health *fw_health = bp->fw_health;
2247         u32 reg = fw_health->regs[reg_idx];
2248         u32 reg_type, reg_off, val = 0;
2249
2250         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2251         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2252         switch (reg_type) {
2253         case BNXT_FW_HEALTH_REG_TYPE_CFG:
2254                 pci_read_config_dword(bp->pdev, reg_off, &val);
2255                 break;
2256         case BNXT_FW_HEALTH_REG_TYPE_GRC:
2257                 reg_off = fw_health->mapped_regs[reg_idx];
2258                 fallthrough;
2259         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2260                 val = readl(bp->bar0 + reg_off);
2261                 break;
2262         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2263                 val = readl(bp->bar1 + reg_off);
2264                 break;
2265         }
2266         if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2267                 val &= fw_health->fw_reset_inprog_reg_mask;
2268         return val;
2269 }
2270
2271 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2272 {
2273         int i;
2274
2275         for (i = 0; i < bp->rx_nr_rings; i++) {
2276                 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2277                 struct bnxt_ring_grp_info *grp_info;
2278
2279                 grp_info = &bp->grp_info[grp_idx];
2280                 if (grp_info->agg_fw_ring_id == ring_id)
2281                         return grp_idx;
2282         }
2283         return INVALID_HW_RING_ID;
2284 }
2285
2286 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2287 {
2288         struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2289
2290         if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2291                 return link_info->force_link_speed2;
2292         if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2293                 return link_info->force_pam4_link_speed;
2294         return link_info->force_link_speed;
2295 }
2296
2297 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2298 {
2299         struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2300
2301         if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2302                 link_info->req_link_speed = link_info->force_link_speed2;
2303                 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2304                 switch (link_info->req_link_speed) {
2305                 case BNXT_LINK_SPEED_50GB_PAM4:
2306                 case BNXT_LINK_SPEED_100GB_PAM4:
2307                 case BNXT_LINK_SPEED_200GB_PAM4:
2308                 case BNXT_LINK_SPEED_400GB_PAM4:
2309                         link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2310                         break;
2311                 case BNXT_LINK_SPEED_100GB_PAM4_112:
2312                 case BNXT_LINK_SPEED_200GB_PAM4_112:
2313                 case BNXT_LINK_SPEED_400GB_PAM4_112:
2314                         link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2315                         break;
2316                 default:
2317                         link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2318                 }
2319                 return;
2320         }
2321         link_info->req_link_speed = link_info->force_link_speed;
2322         link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2323         if (link_info->force_pam4_link_speed) {
2324                 link_info->req_link_speed = link_info->force_pam4_link_speed;
2325                 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2326         }
2327 }
2328
2329 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2330 {
2331         struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2332
2333         if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2334                 link_info->advertising = link_info->auto_link_speeds2;
2335                 return;
2336         }
2337         link_info->advertising = link_info->auto_link_speeds;
2338         link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2339 }
2340
2341 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2342 {
2343         struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2344
2345         if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2346                 if (link_info->req_link_speed != link_info->force_link_speed2)
2347                         return true;
2348                 return false;
2349         }
2350         if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2351             link_info->req_link_speed != link_info->force_link_speed)
2352                 return true;
2353         if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2354             link_info->req_link_speed != link_info->force_pam4_link_speed)
2355                 return true;
2356         return false;
2357 }
2358
2359 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2360 {
2361         struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2362
2363         if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2364                 if (link_info->advertising != link_info->auto_link_speeds2)
2365                         return true;
2366                 return false;
2367         }
2368         if (link_info->advertising != link_info->auto_link_speeds ||
2369             link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2370                 return true;
2371         return false;
2372 }
2373
2374 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2)                          \
2375         ((data2) &                                                      \
2376           ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2377
2378 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)                        \
2379         (((data2) &                                                     \
2380           ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2381          ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2382
2383 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)                       \
2384         ((data1) &                                                      \
2385          ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2386
2387 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)             \
2388         (((data1) &                                                     \
2389           ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2390          ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2391
2392 /* Return true if the workqueue has to be scheduled */
2393 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2394 {
2395         u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2396
2397         switch (err_type) {
2398         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2399                 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2400                            BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2401                 break;
2402         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2403                 netdev_warn(bp->dev, "Pause Storm detected!\n");
2404                 break;
2405         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2406                 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2407                 break;
2408         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2409                 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2410                 char *threshold_type;
2411                 bool notify = false;
2412                 char *dir_str;
2413
2414                 switch (type) {
2415                 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2416                         threshold_type = "warning";
2417                         break;
2418                 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2419                         threshold_type = "critical";
2420                         break;
2421                 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2422                         threshold_type = "fatal";
2423                         break;
2424                 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2425                         threshold_type = "shutdown";
2426                         break;
2427                 default:
2428                         netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2429                         return false;
2430                 }
2431                 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2432                         dir_str = "above";
2433                         notify = true;
2434                 } else {
2435                         dir_str = "below";
2436                 }
2437                 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2438                             dir_str, threshold_type);
2439                 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2440                             BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2441                             BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2442                 if (notify) {
2443                         bp->thermal_threshold_type = type;
2444                         set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2445                         return true;
2446                 }
2447                 return false;
2448         }
2449         default:
2450                 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2451                            err_type);
2452                 break;
2453         }
2454         return false;
2455 }
2456
2457 #define BNXT_GET_EVENT_PORT(data)       \
2458         ((data) &                       \
2459          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2460
2461 #define BNXT_EVENT_RING_TYPE(data2)     \
2462         ((data2) &                      \
2463          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2464
2465 #define BNXT_EVENT_RING_TYPE_RX(data2)  \
2466         (BNXT_EVENT_RING_TYPE(data2) == \
2467          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2468
2469 #define BNXT_EVENT_PHC_EVENT_TYPE(data1)        \
2470         (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2471          ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2472
2473 #define BNXT_EVENT_PHC_RTC_UPDATE(data1)        \
2474         (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2475          ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2476
2477 #define BNXT_PHC_BITS   48
2478
2479 static int bnxt_async_event_process(struct bnxt *bp,
2480                                     struct hwrm_async_event_cmpl *cmpl)
2481 {
2482         u16 event_id = le16_to_cpu(cmpl->event_id);
2483         u32 data1 = le32_to_cpu(cmpl->event_data1);
2484         u32 data2 = le32_to_cpu(cmpl->event_data2);
2485
2486         netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2487                    event_id, data1, data2);
2488
2489         /* TODO CHIMP_FW: Define event id's for link change, error etc */
2490         switch (event_id) {
2491         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2492                 struct bnxt_link_info *link_info = &bp->link_info;
2493
2494                 if (BNXT_VF(bp))
2495                         goto async_event_process_exit;
2496
2497                 /* print unsupported speed warning in forced speed mode only */
2498                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2499                     (data1 & 0x20000)) {
2500                         u16 fw_speed = bnxt_get_force_speed(link_info);
2501                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2502
2503                         if (speed != SPEED_UNKNOWN)
2504                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2505                                             speed);
2506                 }
2507                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2508         }
2509                 fallthrough;
2510         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2511         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2512                 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2513                 fallthrough;
2514         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2515                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2516                 break;
2517         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2518                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2519                 break;
2520         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2521                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2522
2523                 if (BNXT_VF(bp))
2524                         break;
2525
2526                 if (bp->pf.port_id != port_id)
2527                         break;
2528
2529                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2530                 break;
2531         }
2532         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2533                 if (BNXT_PF(bp))
2534                         goto async_event_process_exit;
2535                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2536                 break;
2537         case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2538                 char *type_str = "Solicited";
2539
2540                 if (!bp->fw_health)
2541                         goto async_event_process_exit;
2542
2543                 bp->fw_reset_timestamp = jiffies;
2544                 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2545                 if (!bp->fw_reset_min_dsecs)
2546                         bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2547                 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2548                 if (!bp->fw_reset_max_dsecs)
2549                         bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2550                 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2551                         set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2552                 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2553                         type_str = "Fatal";
2554                         bp->fw_health->fatalities++;
2555                         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2556                 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2557                            EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2558                         type_str = "Non-fatal";
2559                         bp->fw_health->survivals++;
2560                         set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2561                 }
2562                 netif_warn(bp, hw, bp->dev,
2563                            "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2564                            type_str, data1, data2,
2565                            bp->fw_reset_min_dsecs * 100,
2566                            bp->fw_reset_max_dsecs * 100);
2567                 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2568                 break;
2569         }
2570         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2571                 struct bnxt_fw_health *fw_health = bp->fw_health;
2572                 char *status_desc = "healthy";
2573                 u32 status;
2574
2575                 if (!fw_health)
2576                         goto async_event_process_exit;
2577
2578                 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2579                         fw_health->enabled = false;
2580                         netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2581                         break;
2582                 }
2583                 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2584                 fw_health->tmr_multiplier =
2585                         DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2586                                      bp->current_interval * 10);
2587                 fw_health->tmr_counter = fw_health->tmr_multiplier;
2588                 if (!fw_health->enabled)
2589                         fw_health->last_fw_heartbeat =
2590                                 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2591                 fw_health->last_fw_reset_cnt =
2592                         bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2593                 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2594                 if (status != BNXT_FW_STATUS_HEALTHY)
2595                         status_desc = "unhealthy";
2596                 netif_info(bp, drv, bp->dev,
2597                            "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2598                            fw_health->primary ? "primary" : "backup", status,
2599                            status_desc, fw_health->last_fw_reset_cnt);
2600                 if (!fw_health->enabled) {
2601                         /* Make sure tmr_counter is set and visible to
2602                          * bnxt_health_check() before setting enabled to true.
2603                          */
2604                         smp_wmb();
2605                         fw_health->enabled = true;
2606                 }
2607                 goto async_event_process_exit;
2608         }
2609         case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2610                 netif_notice(bp, hw, bp->dev,
2611                              "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2612                              data1, data2);
2613                 goto async_event_process_exit;
2614         case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2615                 struct bnxt_rx_ring_info *rxr;
2616                 u16 grp_idx;
2617
2618                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2619                         goto async_event_process_exit;
2620
2621                 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2622                             BNXT_EVENT_RING_TYPE(data2), data1);
2623                 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2624                         goto async_event_process_exit;
2625
2626                 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2627                 if (grp_idx == INVALID_HW_RING_ID) {
2628                         netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2629                                     data1);
2630                         goto async_event_process_exit;
2631                 }
2632                 rxr = bp->bnapi[grp_idx]->rx_ring;
2633                 bnxt_sched_reset_rxr(bp, rxr);
2634                 goto async_event_process_exit;
2635         }
2636         case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2637                 struct bnxt_fw_health *fw_health = bp->fw_health;
2638
2639                 netif_notice(bp, hw, bp->dev,
2640                              "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2641                              data1, data2);
2642                 if (fw_health) {
2643                         fw_health->echo_req_data1 = data1;
2644                         fw_health->echo_req_data2 = data2;
2645                         set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2646                         break;
2647                 }
2648                 goto async_event_process_exit;
2649         }
2650         case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2651                 bnxt_ptp_pps_event(bp, data1, data2);
2652                 goto async_event_process_exit;
2653         }
2654         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2655                 if (bnxt_event_error_report(bp, data1, data2))
2656                         break;
2657                 goto async_event_process_exit;
2658         }
2659         case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2660                 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2661                 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2662                         if (BNXT_PTP_USE_RTC(bp)) {
2663                                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2664                                 u64 ns;
2665
2666                                 if (!ptp)
2667                                         goto async_event_process_exit;
2668
2669                                 spin_lock_bh(&ptp->ptp_lock);
2670                                 bnxt_ptp_update_current_time(bp);
2671                                 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2672                                        BNXT_PHC_BITS) | ptp->current_time);
2673                                 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2674                                 spin_unlock_bh(&ptp->ptp_lock);
2675                         }
2676                         break;
2677                 }
2678                 goto async_event_process_exit;
2679         }
2680         case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2681                 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2682
2683                 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2684                 goto async_event_process_exit;
2685         }
2686         default:
2687                 goto async_event_process_exit;
2688         }
2689         __bnxt_queue_sp_work(bp);
2690 async_event_process_exit:
2691         return 0;
2692 }
2693
2694 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2695 {
2696         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2697         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2698         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2699                                 (struct hwrm_fwd_req_cmpl *)txcmp;
2700
2701         switch (cmpl_type) {
2702         case CMPL_BASE_TYPE_HWRM_DONE:
2703                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2704                 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2705                 break;
2706
2707         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2708                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2709
2710                 if ((vf_id < bp->pf.first_vf_id) ||
2711                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2712                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2713                                    vf_id);
2714                         return -EINVAL;
2715                 }
2716
2717                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2718                 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2719                 break;
2720
2721         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2722                 bnxt_async_event_process(bp,
2723                                          (struct hwrm_async_event_cmpl *)txcmp);
2724                 break;
2725
2726         default:
2727                 break;
2728         }
2729
2730         return 0;
2731 }
2732
2733 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2734 {
2735         struct bnxt_napi *bnapi = dev_instance;
2736         struct bnxt *bp = bnapi->bp;
2737         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2738         u32 cons = RING_CMP(cpr->cp_raw_cons);
2739
2740         cpr->event_ctr++;
2741         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2742         napi_schedule(&bnapi->napi);
2743         return IRQ_HANDLED;
2744 }
2745
2746 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2747 {
2748         u32 raw_cons = cpr->cp_raw_cons;
2749         u16 cons = RING_CMP(raw_cons);
2750         struct tx_cmp *txcmp;
2751
2752         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2753
2754         return TX_CMP_VALID(txcmp, raw_cons);
2755 }
2756
2757 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2758 {
2759         struct bnxt_napi *bnapi = dev_instance;
2760         struct bnxt *bp = bnapi->bp;
2761         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2762         u32 cons = RING_CMP(cpr->cp_raw_cons);
2763         u32 int_status;
2764
2765         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2766
2767         if (!bnxt_has_work(bp, cpr)) {
2768                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2769                 /* return if erroneous interrupt */
2770                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2771                         return IRQ_NONE;
2772         }
2773
2774         /* disable ring IRQ */
2775         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2776
2777         /* Return here if interrupt is shared and is disabled. */
2778         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2779                 return IRQ_HANDLED;
2780
2781         napi_schedule(&bnapi->napi);
2782         return IRQ_HANDLED;
2783 }
2784
2785 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2786                             int budget)
2787 {
2788         struct bnxt_napi *bnapi = cpr->bnapi;
2789         u32 raw_cons = cpr->cp_raw_cons;
2790         u32 cons;
2791         int rx_pkts = 0;
2792         u8 event = 0;
2793         struct tx_cmp *txcmp;
2794
2795         cpr->has_more_work = 0;
2796         cpr->had_work_done = 1;
2797         while (1) {
2798                 u8 cmp_type;
2799                 int rc;
2800
2801                 cons = RING_CMP(raw_cons);
2802                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2803
2804                 if (!TX_CMP_VALID(txcmp, raw_cons))
2805                         break;
2806
2807                 /* The valid test of the entry must be done first before
2808                  * reading any further.
2809                  */
2810                 dma_rmb();
2811                 cmp_type = TX_CMP_TYPE(txcmp);
2812                 if (cmp_type == CMP_TYPE_TX_L2_CMP ||
2813                     cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
2814                         u32 opaque = txcmp->tx_cmp_opaque;
2815                         struct bnxt_tx_ring_info *txr;
2816                         u16 tx_freed;
2817
2818                         txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
2819                         event |= BNXT_TX_CMP_EVENT;
2820                         if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
2821                                 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
2822                         else
2823                                 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
2824                         tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
2825                                    bp->tx_ring_mask;
2826                         /* return full budget so NAPI will complete. */
2827                         if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
2828                                 rx_pkts = budget;
2829                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2830                                 if (budget)
2831                                         cpr->has_more_work = 1;
2832                                 break;
2833                         }
2834                 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
2835                            cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2836                         if (likely(budget))
2837                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2838                         else
2839                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2840                                                            &event);
2841                         if (likely(rc >= 0))
2842                                 rx_pkts += rc;
2843                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2844                          * the NAPI budget.  Otherwise, we may potentially loop
2845                          * here forever if we consistently cannot allocate
2846                          * buffers.
2847                          */
2848                         else if (rc == -ENOMEM && budget)
2849                                 rx_pkts++;
2850                         else if (rc == -EBUSY)  /* partial completion */
2851                                 break;
2852                 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
2853                                     cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
2854                                     cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
2855                         bnxt_hwrm_handler(bp, txcmp);
2856                 }
2857                 raw_cons = NEXT_RAW_CMP(raw_cons);
2858
2859                 if (rx_pkts && rx_pkts == budget) {
2860                         cpr->has_more_work = 1;
2861                         break;
2862                 }
2863         }
2864
2865         if (event & BNXT_REDIRECT_EVENT)
2866                 xdp_do_flush();
2867
2868         if (event & BNXT_TX_EVENT) {
2869                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
2870                 u16 prod = txr->tx_prod;
2871
2872                 /* Sync BD data before updating doorbell */
2873                 wmb();
2874
2875                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2876         }
2877
2878         cpr->cp_raw_cons = raw_cons;
2879         bnapi->events |= event;
2880         return rx_pkts;
2881 }
2882
2883 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2884                                   int budget)
2885 {
2886         if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
2887                 bnapi->tx_int(bp, bnapi, budget);
2888
2889         if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2890                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2891
2892                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2893         }
2894         if (bnapi->events & BNXT_AGG_EVENT) {
2895                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2896
2897                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2898         }
2899         bnapi->events &= BNXT_TX_CMP_EVENT;
2900 }
2901
2902 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2903                           int budget)
2904 {
2905         struct bnxt_napi *bnapi = cpr->bnapi;
2906         int rx_pkts;
2907
2908         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2909
2910         /* ACK completion ring before freeing tx ring and producing new
2911          * buffers in rx/agg rings to prevent overflowing the completion
2912          * ring.
2913          */
2914         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2915
2916         __bnxt_poll_work_done(bp, bnapi, budget);
2917         return rx_pkts;
2918 }
2919
2920 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2921 {
2922         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2923         struct bnxt *bp = bnapi->bp;
2924         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2925         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2926         struct tx_cmp *txcmp;
2927         struct rx_cmp_ext *rxcmp1;
2928         u32 cp_cons, tmp_raw_cons;
2929         u32 raw_cons = cpr->cp_raw_cons;
2930         bool flush_xdp = false;
2931         u32 rx_pkts = 0;
2932         u8 event = 0;
2933
2934         while (1) {
2935                 int rc;
2936
2937                 cp_cons = RING_CMP(raw_cons);
2938                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2939
2940                 if (!TX_CMP_VALID(txcmp, raw_cons))
2941                         break;
2942
2943                 /* The valid test of the entry must be done first before
2944                  * reading any further.
2945                  */
2946                 dma_rmb();
2947                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2948                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2949                         cp_cons = RING_CMP(tmp_raw_cons);
2950                         rxcmp1 = (struct rx_cmp_ext *)
2951                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2952
2953                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2954                                 break;
2955
2956                         /* force an error to recycle the buffer */
2957                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2958                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2959
2960                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2961                         if (likely(rc == -EIO) && budget)
2962                                 rx_pkts++;
2963                         else if (rc == -EBUSY)  /* partial completion */
2964                                 break;
2965                         if (event & BNXT_REDIRECT_EVENT)
2966                                 flush_xdp = true;
2967                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2968                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2969                         bnxt_hwrm_handler(bp, txcmp);
2970                 } else {
2971                         netdev_err(bp->dev,
2972                                    "Invalid completion received on special ring\n");
2973                 }
2974                 raw_cons = NEXT_RAW_CMP(raw_cons);
2975
2976                 if (rx_pkts == budget)
2977                         break;
2978         }
2979
2980         cpr->cp_raw_cons = raw_cons;
2981         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2982         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2983
2984         if (event & BNXT_AGG_EVENT)
2985                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2986         if (flush_xdp)
2987                 xdp_do_flush();
2988
2989         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2990                 napi_complete_done(napi, rx_pkts);
2991                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2992         }
2993         return rx_pkts;
2994 }
2995
2996 static int bnxt_poll(struct napi_struct *napi, int budget)
2997 {
2998         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2999         struct bnxt *bp = bnapi->bp;
3000         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3001         int work_done = 0;
3002
3003         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3004                 napi_complete(napi);
3005                 return 0;
3006         }
3007         while (1) {
3008                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3009
3010                 if (work_done >= budget) {
3011                         if (!budget)
3012                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3013                         break;
3014                 }
3015
3016                 if (!bnxt_has_work(bp, cpr)) {
3017                         if (napi_complete_done(napi, work_done))
3018                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3019                         break;
3020                 }
3021         }
3022         if (bp->flags & BNXT_FLAG_DIM) {
3023                 struct dim_sample dim_sample = {};
3024
3025                 dim_update_sample(cpr->event_ctr,
3026                                   cpr->rx_packets,
3027                                   cpr->rx_bytes,
3028                                   &dim_sample);
3029                 net_dim(&cpr->dim, dim_sample);
3030         }
3031         return work_done;
3032 }
3033
3034 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3035 {
3036         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3037         int i, work_done = 0;
3038
3039         for (i = 0; i < cpr->cp_ring_count; i++) {
3040                 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3041
3042                 if (cpr2->had_nqe_notify) {
3043                         work_done += __bnxt_poll_work(bp, cpr2,
3044                                                       budget - work_done);
3045                         cpr->has_more_work |= cpr2->has_more_work;
3046                 }
3047         }
3048         return work_done;
3049 }
3050
3051 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3052                                  u64 dbr_type, int budget)
3053 {
3054         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3055         int i;
3056
3057         for (i = 0; i < cpr->cp_ring_count; i++) {
3058                 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3059                 struct bnxt_db_info *db;
3060
3061                 if (cpr2->had_work_done) {
3062                         u32 tgl = 0;
3063
3064                         if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3065                                 cpr2->had_nqe_notify = 0;
3066                                 tgl = cpr2->toggle;
3067                         }
3068                         db = &cpr2->cp_db;
3069                         bnxt_writeq(bp,
3070                                     db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3071                                     DB_RING_IDX(db, cpr2->cp_raw_cons),
3072                                     db->doorbell);
3073                         cpr2->had_work_done = 0;
3074                 }
3075         }
3076         __bnxt_poll_work_done(bp, bnapi, budget);
3077 }
3078
3079 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3080 {
3081         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3082         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3083         struct bnxt_cp_ring_info *cpr_rx;
3084         u32 raw_cons = cpr->cp_raw_cons;
3085         struct bnxt *bp = bnapi->bp;
3086         struct nqe_cn *nqcmp;
3087         int work_done = 0;
3088         u32 cons;
3089
3090         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3091                 napi_complete(napi);
3092                 return 0;
3093         }
3094         if (cpr->has_more_work) {
3095                 cpr->has_more_work = 0;
3096                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3097         }
3098         while (1) {
3099                 u16 type;
3100
3101                 cons = RING_CMP(raw_cons);
3102                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3103
3104                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3105                         if (cpr->has_more_work)
3106                                 break;
3107
3108                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3109                                              budget);
3110                         cpr->cp_raw_cons = raw_cons;
3111                         if (napi_complete_done(napi, work_done))
3112                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3113                                                   cpr->cp_raw_cons);
3114                         goto poll_done;
3115                 }
3116
3117                 /* The valid test of the entry must be done first before
3118                  * reading any further.
3119                  */
3120                 dma_rmb();
3121
3122                 type = le16_to_cpu(nqcmp->type);
3123                 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3124                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3125                         u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3126                         struct bnxt_cp_ring_info *cpr2;
3127
3128                         /* No more budget for RX work */
3129                         if (budget && work_done >= budget &&
3130                             cq_type == BNXT_NQ_HDL_TYPE_RX)
3131                                 break;
3132
3133                         idx = BNXT_NQ_HDL_IDX(idx);
3134                         cpr2 = &cpr->cp_ring_arr[idx];
3135                         cpr2->had_nqe_notify = 1;
3136                         cpr2->toggle = NQE_CN_TOGGLE(type);
3137                         work_done += __bnxt_poll_work(bp, cpr2,
3138                                                       budget - work_done);
3139                         cpr->has_more_work |= cpr2->has_more_work;
3140                 } else {
3141                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3142                 }
3143                 raw_cons = NEXT_RAW_CMP(raw_cons);
3144         }
3145         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3146         if (raw_cons != cpr->cp_raw_cons) {
3147                 cpr->cp_raw_cons = raw_cons;
3148                 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3149         }
3150 poll_done:
3151         cpr_rx = &cpr->cp_ring_arr[0];
3152         if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3153             (bp->flags & BNXT_FLAG_DIM)) {
3154                 struct dim_sample dim_sample = {};
3155
3156                 dim_update_sample(cpr->event_ctr,
3157                                   cpr_rx->rx_packets,
3158                                   cpr_rx->rx_bytes,
3159                                   &dim_sample);
3160                 net_dim(&cpr->dim, dim_sample);
3161         }
3162         return work_done;
3163 }
3164
3165 static void bnxt_free_tx_skbs(struct bnxt *bp)
3166 {
3167         int i, max_idx;
3168         struct pci_dev *pdev = bp->pdev;
3169
3170         if (!bp->tx_ring)
3171                 return;
3172
3173         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3174         for (i = 0; i < bp->tx_nr_rings; i++) {
3175                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3176                 int j;
3177
3178                 if (!txr->tx_buf_ring)
3179                         continue;
3180
3181                 for (j = 0; j < max_idx;) {
3182                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
3183                         struct sk_buff *skb;
3184                         int k, last;
3185
3186                         if (i < bp->tx_nr_rings_xdp &&
3187                             tx_buf->action == XDP_REDIRECT) {
3188                                 dma_unmap_single(&pdev->dev,
3189                                         dma_unmap_addr(tx_buf, mapping),
3190                                         dma_unmap_len(tx_buf, len),
3191                                         DMA_TO_DEVICE);
3192                                 xdp_return_frame(tx_buf->xdpf);
3193                                 tx_buf->action = 0;
3194                                 tx_buf->xdpf = NULL;
3195                                 j++;
3196                                 continue;
3197                         }
3198
3199                         skb = tx_buf->skb;
3200                         if (!skb) {
3201                                 j++;
3202                                 continue;
3203                         }
3204
3205                         tx_buf->skb = NULL;
3206
3207                         if (tx_buf->is_push) {
3208                                 dev_kfree_skb(skb);
3209                                 j += 2;
3210                                 continue;
3211                         }
3212
3213                         dma_unmap_single(&pdev->dev,
3214                                          dma_unmap_addr(tx_buf, mapping),
3215                                          skb_headlen(skb),
3216                                          DMA_TO_DEVICE);
3217
3218                         last = tx_buf->nr_frags;
3219                         j += 2;
3220                         for (k = 0; k < last; k++, j++) {
3221                                 int ring_idx = j & bp->tx_ring_mask;
3222                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
3223
3224                                 tx_buf = &txr->tx_buf_ring[ring_idx];
3225                                 dma_unmap_page(
3226                                         &pdev->dev,
3227                                         dma_unmap_addr(tx_buf, mapping),
3228                                         skb_frag_size(frag), DMA_TO_DEVICE);
3229                         }
3230                         dev_kfree_skb(skb);
3231                 }
3232                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
3233         }
3234 }
3235
3236 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
3237 {
3238         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3239         struct pci_dev *pdev = bp->pdev;
3240         struct bnxt_tpa_idx_map *map;
3241         int i, max_idx, max_agg_idx;
3242
3243         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3244         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3245         if (!rxr->rx_tpa)
3246                 goto skip_rx_tpa_free;
3247
3248         for (i = 0; i < bp->max_tpa; i++) {
3249                 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3250                 u8 *data = tpa_info->data;
3251
3252                 if (!data)
3253                         continue;
3254
3255                 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
3256                                        bp->rx_buf_use_size, bp->rx_dir,
3257                                        DMA_ATTR_WEAK_ORDERING);
3258
3259                 tpa_info->data = NULL;
3260
3261                 skb_free_frag(data);
3262         }
3263
3264 skip_rx_tpa_free:
3265         if (!rxr->rx_buf_ring)
3266                 goto skip_rx_buf_free;
3267
3268         for (i = 0; i < max_idx; i++) {
3269                 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3270                 dma_addr_t mapping = rx_buf->mapping;
3271                 void *data = rx_buf->data;
3272
3273                 if (!data)
3274                         continue;
3275
3276                 rx_buf->data = NULL;
3277                 if (BNXT_RX_PAGE_MODE(bp)) {
3278                         page_pool_recycle_direct(rxr->page_pool, data);
3279                 } else {
3280                         dma_unmap_single_attrs(&pdev->dev, mapping,
3281                                                bp->rx_buf_use_size, bp->rx_dir,
3282                                                DMA_ATTR_WEAK_ORDERING);
3283                         skb_free_frag(data);
3284                 }
3285         }
3286
3287 skip_rx_buf_free:
3288         if (!rxr->rx_agg_ring)
3289                 goto skip_rx_agg_free;
3290
3291         for (i = 0; i < max_agg_idx; i++) {
3292                 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3293                 struct page *page = rx_agg_buf->page;
3294
3295                 if (!page)
3296                         continue;
3297
3298                 rx_agg_buf->page = NULL;
3299                 __clear_bit(i, rxr->rx_agg_bmap);
3300
3301                 page_pool_recycle_direct(rxr->page_pool, page);
3302         }
3303
3304 skip_rx_agg_free:
3305         map = rxr->rx_tpa_idx_map;
3306         if (map)
3307                 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3308 }
3309
3310 static void bnxt_free_rx_skbs(struct bnxt *bp)
3311 {
3312         int i;
3313
3314         if (!bp->rx_ring)
3315                 return;
3316
3317         for (i = 0; i < bp->rx_nr_rings; i++)
3318                 bnxt_free_one_rx_ring_skbs(bp, i);
3319 }
3320
3321 static void bnxt_free_skbs(struct bnxt *bp)
3322 {
3323         bnxt_free_tx_skbs(bp);
3324         bnxt_free_rx_skbs(bp);
3325 }
3326
3327 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3328 {
3329         u8 init_val = ctxm->init_value;
3330         u16 offset = ctxm->init_offset;
3331         u8 *p2 = p;
3332         int i;
3333
3334         if (!init_val)
3335                 return;
3336         if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3337                 memset(p, init_val, len);
3338                 return;
3339         }
3340         for (i = 0; i < len; i += ctxm->entry_size)
3341                 *(p2 + i + offset) = init_val;
3342 }
3343
3344 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3345 {
3346         struct pci_dev *pdev = bp->pdev;
3347         int i;
3348
3349         if (!rmem->pg_arr)
3350                 goto skip_pages;
3351
3352         for (i = 0; i < rmem->nr_pages; i++) {
3353                 if (!rmem->pg_arr[i])
3354                         continue;
3355
3356                 dma_free_coherent(&pdev->dev, rmem->page_size,
3357                                   rmem->pg_arr[i], rmem->dma_arr[i]);
3358
3359                 rmem->pg_arr[i] = NULL;
3360         }
3361 skip_pages:
3362         if (rmem->pg_tbl) {
3363                 size_t pg_tbl_size = rmem->nr_pages * 8;
3364
3365                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3366                         pg_tbl_size = rmem->page_size;
3367                 dma_free_coherent(&pdev->dev, pg_tbl_size,
3368                                   rmem->pg_tbl, rmem->pg_tbl_map);
3369                 rmem->pg_tbl = NULL;
3370         }
3371         if (rmem->vmem_size && *rmem->vmem) {
3372                 vfree(*rmem->vmem);
3373                 *rmem->vmem = NULL;
3374         }
3375 }
3376
3377 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3378 {
3379         struct pci_dev *pdev = bp->pdev;
3380         u64 valid_bit = 0;
3381         int i;
3382
3383         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3384                 valid_bit = PTU_PTE_VALID;
3385         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3386                 size_t pg_tbl_size = rmem->nr_pages * 8;
3387
3388                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3389                         pg_tbl_size = rmem->page_size;
3390                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3391                                                   &rmem->pg_tbl_map,
3392                                                   GFP_KERNEL);
3393                 if (!rmem->pg_tbl)
3394                         return -ENOMEM;
3395         }
3396
3397         for (i = 0; i < rmem->nr_pages; i++) {
3398                 u64 extra_bits = valid_bit;
3399
3400                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3401                                                      rmem->page_size,
3402                                                      &rmem->dma_arr[i],
3403                                                      GFP_KERNEL);
3404                 if (!rmem->pg_arr[i])
3405                         return -ENOMEM;
3406
3407                 if (rmem->ctx_mem)
3408                         bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3409                                           rmem->page_size);
3410                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3411                         if (i == rmem->nr_pages - 2 &&
3412                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3413                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3414                         else if (i == rmem->nr_pages - 1 &&
3415                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3416                                 extra_bits |= PTU_PTE_LAST;
3417                         rmem->pg_tbl[i] =
3418                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3419                 }
3420         }
3421
3422         if (rmem->vmem_size) {
3423                 *rmem->vmem = vzalloc(rmem->vmem_size);
3424                 if (!(*rmem->vmem))
3425                         return -ENOMEM;
3426         }
3427         return 0;
3428 }
3429
3430 static void bnxt_free_tpa_info(struct bnxt *bp)
3431 {
3432         int i, j;
3433
3434         for (i = 0; i < bp->rx_nr_rings; i++) {
3435                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3436
3437                 kfree(rxr->rx_tpa_idx_map);
3438                 rxr->rx_tpa_idx_map = NULL;
3439                 if (rxr->rx_tpa) {
3440                         for (j = 0; j < bp->max_tpa; j++) {
3441                                 kfree(rxr->rx_tpa[j].agg_arr);
3442                                 rxr->rx_tpa[j].agg_arr = NULL;
3443                         }
3444                 }
3445                 kfree(rxr->rx_tpa);
3446                 rxr->rx_tpa = NULL;
3447         }
3448 }
3449
3450 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3451 {
3452         int i, j;
3453
3454         bp->max_tpa = MAX_TPA;
3455         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3456                 if (!bp->max_tpa_v2)
3457                         return 0;
3458                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3459         }
3460
3461         for (i = 0; i < bp->rx_nr_rings; i++) {
3462                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3463                 struct rx_agg_cmp *agg;
3464
3465                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3466                                       GFP_KERNEL);
3467                 if (!rxr->rx_tpa)
3468                         return -ENOMEM;
3469
3470                 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3471                         continue;
3472                 for (j = 0; j < bp->max_tpa; j++) {
3473                         agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3474                         if (!agg)
3475                                 return -ENOMEM;
3476                         rxr->rx_tpa[j].agg_arr = agg;
3477                 }
3478                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3479                                               GFP_KERNEL);
3480                 if (!rxr->rx_tpa_idx_map)
3481                         return -ENOMEM;
3482         }
3483         return 0;
3484 }
3485
3486 static void bnxt_free_rx_rings(struct bnxt *bp)
3487 {
3488         int i;
3489
3490         if (!bp->rx_ring)
3491                 return;
3492
3493         bnxt_free_tpa_info(bp);
3494         for (i = 0; i < bp->rx_nr_rings; i++) {
3495                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3496                 struct bnxt_ring_struct *ring;
3497
3498                 if (rxr->xdp_prog)
3499                         bpf_prog_put(rxr->xdp_prog);
3500
3501                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3502                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3503
3504                 page_pool_destroy(rxr->page_pool);
3505                 rxr->page_pool = NULL;
3506
3507                 kfree(rxr->rx_agg_bmap);
3508                 rxr->rx_agg_bmap = NULL;
3509
3510                 ring = &rxr->rx_ring_struct;
3511                 bnxt_free_ring(bp, &ring->ring_mem);
3512
3513                 ring = &rxr->rx_agg_ring_struct;
3514                 bnxt_free_ring(bp, &ring->ring_mem);
3515         }
3516 }
3517
3518 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3519                                    struct bnxt_rx_ring_info *rxr)
3520 {
3521         struct page_pool_params pp = { 0 };
3522
3523         pp.pool_size = bp->rx_agg_ring_size;
3524         if (BNXT_RX_PAGE_MODE(bp))
3525                 pp.pool_size += bp->rx_ring_size;
3526         pp.nid = dev_to_node(&bp->pdev->dev);
3527         pp.napi = &rxr->bnapi->napi;
3528         pp.netdev = bp->dev;
3529         pp.dev = &bp->pdev->dev;
3530         pp.dma_dir = bp->rx_dir;
3531         pp.max_len = PAGE_SIZE;
3532         pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3533
3534         rxr->page_pool = page_pool_create(&pp);
3535         if (IS_ERR(rxr->page_pool)) {
3536                 int err = PTR_ERR(rxr->page_pool);
3537
3538                 rxr->page_pool = NULL;
3539                 return err;
3540         }
3541         return 0;
3542 }
3543
3544 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3545 {
3546         int i, rc = 0, agg_rings = 0;
3547
3548         if (!bp->rx_ring)
3549                 return -ENOMEM;
3550
3551         if (bp->flags & BNXT_FLAG_AGG_RINGS)
3552                 agg_rings = 1;
3553
3554         for (i = 0; i < bp->rx_nr_rings; i++) {
3555                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3556                 struct bnxt_ring_struct *ring;
3557
3558                 ring = &rxr->rx_ring_struct;
3559
3560                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3561                 if (rc)
3562                         return rc;
3563
3564                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3565                 if (rc < 0)
3566                         return rc;
3567
3568                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3569                                                 MEM_TYPE_PAGE_POOL,
3570                                                 rxr->page_pool);
3571                 if (rc) {
3572                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3573                         return rc;
3574                 }
3575
3576                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3577                 if (rc)
3578                         return rc;
3579
3580                 ring->grp_idx = i;
3581                 if (agg_rings) {
3582                         u16 mem_size;
3583
3584                         ring = &rxr->rx_agg_ring_struct;
3585                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3586                         if (rc)
3587                                 return rc;
3588
3589                         ring->grp_idx = i;
3590                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3591                         mem_size = rxr->rx_agg_bmap_size / 8;
3592                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3593                         if (!rxr->rx_agg_bmap)
3594                                 return -ENOMEM;
3595                 }
3596         }
3597         if (bp->flags & BNXT_FLAG_TPA)
3598                 rc = bnxt_alloc_tpa_info(bp);
3599         return rc;
3600 }
3601
3602 static void bnxt_free_tx_rings(struct bnxt *bp)
3603 {
3604         int i;
3605         struct pci_dev *pdev = bp->pdev;
3606
3607         if (!bp->tx_ring)
3608                 return;
3609
3610         for (i = 0; i < bp->tx_nr_rings; i++) {
3611                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3612                 struct bnxt_ring_struct *ring;
3613
3614                 if (txr->tx_push) {
3615                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
3616                                           txr->tx_push, txr->tx_push_mapping);
3617                         txr->tx_push = NULL;
3618                 }
3619
3620                 ring = &txr->tx_ring_struct;
3621
3622                 bnxt_free_ring(bp, &ring->ring_mem);
3623         }
3624 }
3625
3626 #define BNXT_TC_TO_RING_BASE(bp, tc)    \
3627         ((tc) * (bp)->tx_nr_rings_per_tc)
3628
3629 #define BNXT_RING_TO_TC_OFF(bp, tx)     \
3630         ((tx) % (bp)->tx_nr_rings_per_tc)
3631
3632 #define BNXT_RING_TO_TC(bp, tx)         \
3633         ((tx) / (bp)->tx_nr_rings_per_tc)
3634
3635 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3636 {
3637         int i, j, rc;
3638         struct pci_dev *pdev = bp->pdev;
3639
3640         bp->tx_push_size = 0;
3641         if (bp->tx_push_thresh) {
3642                 int push_size;
3643
3644                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3645                                         bp->tx_push_thresh);
3646
3647                 if (push_size > 256) {
3648                         push_size = 0;
3649                         bp->tx_push_thresh = 0;
3650                 }
3651
3652                 bp->tx_push_size = push_size;
3653         }
3654
3655         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3656                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3657                 struct bnxt_ring_struct *ring;
3658                 u8 qidx;
3659
3660                 ring = &txr->tx_ring_struct;
3661
3662                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3663                 if (rc)
3664                         return rc;
3665
3666                 ring->grp_idx = txr->bnapi->index;
3667                 if (bp->tx_push_size) {
3668                         dma_addr_t mapping;
3669
3670                         /* One pre-allocated DMA buffer to backup
3671                          * TX push operation
3672                          */
3673                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
3674                                                 bp->tx_push_size,
3675                                                 &txr->tx_push_mapping,
3676                                                 GFP_KERNEL);
3677
3678                         if (!txr->tx_push)
3679                                 return -ENOMEM;
3680
3681                         mapping = txr->tx_push_mapping +
3682                                 sizeof(struct tx_push_bd);
3683                         txr->data_mapping = cpu_to_le64(mapping);
3684                 }
3685                 qidx = bp->tc_to_qidx[j];
3686                 ring->queue_id = bp->q_info[qidx].queue_id;
3687                 spin_lock_init(&txr->xdp_tx_lock);
3688                 if (i < bp->tx_nr_rings_xdp)
3689                         continue;
3690                 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
3691                         j++;
3692         }
3693         return 0;
3694 }
3695
3696 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3697 {
3698         struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3699
3700         kfree(cpr->cp_desc_ring);
3701         cpr->cp_desc_ring = NULL;
3702         ring->ring_mem.pg_arr = NULL;
3703         kfree(cpr->cp_desc_mapping);
3704         cpr->cp_desc_mapping = NULL;
3705         ring->ring_mem.dma_arr = NULL;
3706 }
3707
3708 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3709 {
3710         cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3711         if (!cpr->cp_desc_ring)
3712                 return -ENOMEM;
3713         cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3714                                        GFP_KERNEL);
3715         if (!cpr->cp_desc_mapping)
3716                 return -ENOMEM;
3717         return 0;
3718 }
3719
3720 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3721 {
3722         int i;
3723
3724         if (!bp->bnapi)
3725                 return;
3726         for (i = 0; i < bp->cp_nr_rings; i++) {
3727                 struct bnxt_napi *bnapi = bp->bnapi[i];
3728
3729                 if (!bnapi)
3730                         continue;
3731                 bnxt_free_cp_arrays(&bnapi->cp_ring);
3732         }
3733 }
3734
3735 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3736 {
3737         int i, n = bp->cp_nr_pages;
3738
3739         for (i = 0; i < bp->cp_nr_rings; i++) {
3740                 struct bnxt_napi *bnapi = bp->bnapi[i];
3741                 int rc;
3742
3743                 if (!bnapi)
3744                         continue;
3745                 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3746                 if (rc)
3747                         return rc;
3748         }
3749         return 0;
3750 }
3751
3752 static void bnxt_free_cp_rings(struct bnxt *bp)
3753 {
3754         int i;
3755
3756         if (!bp->bnapi)
3757                 return;
3758
3759         for (i = 0; i < bp->cp_nr_rings; i++) {
3760                 struct bnxt_napi *bnapi = bp->bnapi[i];
3761                 struct bnxt_cp_ring_info *cpr;
3762                 struct bnxt_ring_struct *ring;
3763                 int j;
3764
3765                 if (!bnapi)
3766                         continue;
3767
3768                 cpr = &bnapi->cp_ring;
3769                 ring = &cpr->cp_ring_struct;
3770
3771                 bnxt_free_ring(bp, &ring->ring_mem);
3772
3773                 if (!cpr->cp_ring_arr)
3774                         continue;
3775
3776                 for (j = 0; j < cpr->cp_ring_count; j++) {
3777                         struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
3778
3779                         ring = &cpr2->cp_ring_struct;
3780                         bnxt_free_ring(bp, &ring->ring_mem);
3781                         bnxt_free_cp_arrays(cpr2);
3782                 }
3783                 kfree(cpr->cp_ring_arr);
3784                 cpr->cp_ring_arr = NULL;
3785                 cpr->cp_ring_count = 0;
3786         }
3787 }
3788
3789 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
3790                                   struct bnxt_cp_ring_info *cpr)
3791 {
3792         struct bnxt_ring_mem_info *rmem;
3793         struct bnxt_ring_struct *ring;
3794         int rc;
3795
3796         rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3797         if (rc) {
3798                 bnxt_free_cp_arrays(cpr);
3799                 return -ENOMEM;
3800         }
3801         ring = &cpr->cp_ring_struct;
3802         rmem = &ring->ring_mem;
3803         rmem->nr_pages = bp->cp_nr_pages;
3804         rmem->page_size = HW_CMPD_RING_SIZE;
3805         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3806         rmem->dma_arr = cpr->cp_desc_mapping;
3807         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3808         rc = bnxt_alloc_ring(bp, rmem);
3809         if (rc) {
3810                 bnxt_free_ring(bp, rmem);
3811                 bnxt_free_cp_arrays(cpr);
3812         }
3813         return rc;
3814 }
3815
3816 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3817 {
3818         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3819         int i, j, rc, ulp_base_vec, ulp_msix;
3820         int tcs = bp->num_tc;
3821
3822         if (!tcs)
3823                 tcs = 1;
3824         ulp_msix = bnxt_get_ulp_msix_num(bp);
3825         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3826         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
3827                 struct bnxt_napi *bnapi = bp->bnapi[i];
3828                 struct bnxt_cp_ring_info *cpr, *cpr2;
3829                 struct bnxt_ring_struct *ring;
3830                 int cp_count = 0, k;
3831                 int rx = 0, tx = 0;
3832
3833                 if (!bnapi)
3834                         continue;
3835
3836                 cpr = &bnapi->cp_ring;
3837                 cpr->bnapi = bnapi;
3838                 ring = &cpr->cp_ring_struct;
3839
3840                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3841                 if (rc)
3842                         return rc;
3843
3844                 if (ulp_msix && i >= ulp_base_vec)
3845                         ring->map_idx = i + ulp_msix;
3846                 else
3847                         ring->map_idx = i;
3848
3849                 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3850                         continue;
3851
3852                 if (i < bp->rx_nr_rings) {
3853                         cp_count++;
3854                         rx = 1;
3855                 }
3856                 if (i < bp->tx_nr_rings_xdp) {
3857                         cp_count++;
3858                         tx = 1;
3859                 } else if ((sh && i < bp->tx_nr_rings) ||
3860                          (!sh && i >= bp->rx_nr_rings)) {
3861                         cp_count += tcs;
3862                         tx = 1;
3863                 }
3864
3865                 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
3866                                            GFP_KERNEL);
3867                 if (!cpr->cp_ring_arr)
3868                         return -ENOMEM;
3869                 cpr->cp_ring_count = cp_count;
3870
3871                 for (k = 0; k < cp_count; k++) {
3872                         cpr2 = &cpr->cp_ring_arr[k];
3873                         rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
3874                         if (rc)
3875                                 return rc;
3876                         cpr2->bnapi = bnapi;
3877                         cpr2->cp_idx = k;
3878                         if (!k && rx) {
3879                                 bp->rx_ring[i].rx_cpr = cpr2;
3880                                 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
3881                         } else {
3882                                 int n, tc = k - rx;
3883
3884                                 n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
3885                                 bp->tx_ring[n].tx_cpr = cpr2;
3886                                 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
3887                         }
3888                 }
3889                 if (tx)
3890                         j++;
3891         }
3892         return 0;
3893 }
3894
3895 static void bnxt_init_ring_struct(struct bnxt *bp)
3896 {
3897         int i, j;
3898
3899         for (i = 0; i < bp->cp_nr_rings; i++) {
3900                 struct bnxt_napi *bnapi = bp->bnapi[i];
3901                 struct bnxt_ring_mem_info *rmem;
3902                 struct bnxt_cp_ring_info *cpr;
3903                 struct bnxt_rx_ring_info *rxr;
3904                 struct bnxt_tx_ring_info *txr;
3905                 struct bnxt_ring_struct *ring;
3906
3907                 if (!bnapi)
3908                         continue;
3909
3910                 cpr = &bnapi->cp_ring;
3911                 ring = &cpr->cp_ring_struct;
3912                 rmem = &ring->ring_mem;
3913                 rmem->nr_pages = bp->cp_nr_pages;
3914                 rmem->page_size = HW_CMPD_RING_SIZE;
3915                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3916                 rmem->dma_arr = cpr->cp_desc_mapping;
3917                 rmem->vmem_size = 0;
3918
3919                 rxr = bnapi->rx_ring;
3920                 if (!rxr)
3921                         goto skip_rx;
3922
3923                 ring = &rxr->rx_ring_struct;
3924                 rmem = &ring->ring_mem;
3925                 rmem->nr_pages = bp->rx_nr_pages;
3926                 rmem->page_size = HW_RXBD_RING_SIZE;
3927                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3928                 rmem->dma_arr = rxr->rx_desc_mapping;
3929                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3930                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3931
3932                 ring = &rxr->rx_agg_ring_struct;
3933                 rmem = &ring->ring_mem;
3934                 rmem->nr_pages = bp->rx_agg_nr_pages;
3935                 rmem->page_size = HW_RXBD_RING_SIZE;
3936                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3937                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3938                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3939                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3940
3941 skip_rx:
3942                 bnxt_for_each_napi_tx(j, bnapi, txr) {
3943                         ring = &txr->tx_ring_struct;
3944                         rmem = &ring->ring_mem;
3945                         rmem->nr_pages = bp->tx_nr_pages;
3946                         rmem->page_size = HW_TXBD_RING_SIZE;
3947                         rmem->pg_arr = (void **)txr->tx_desc_ring;
3948                         rmem->dma_arr = txr->tx_desc_mapping;
3949                         rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3950                         rmem->vmem = (void **)&txr->tx_buf_ring;
3951                 }
3952         }
3953 }
3954
3955 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3956 {
3957         int i;
3958         u32 prod;
3959         struct rx_bd **rx_buf_ring;
3960
3961         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3962         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3963                 int j;
3964                 struct rx_bd *rxbd;
3965
3966                 rxbd = rx_buf_ring[i];
3967                 if (!rxbd)
3968                         continue;
3969
3970                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3971                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3972                         rxbd->rx_bd_opaque = prod;
3973                 }
3974         }
3975 }
3976
3977 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3978 {
3979         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3980         struct net_device *dev = bp->dev;
3981         u32 prod;
3982         int i;
3983
3984         prod = rxr->rx_prod;
3985         for (i = 0; i < bp->rx_ring_size; i++) {
3986                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3987                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3988                                     ring_nr, i, bp->rx_ring_size);
3989                         break;
3990                 }
3991                 prod = NEXT_RX(prod);
3992         }
3993         rxr->rx_prod = prod;
3994
3995         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3996                 return 0;
3997
3998         prod = rxr->rx_agg_prod;
3999         for (i = 0; i < bp->rx_agg_ring_size; i++) {
4000                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
4001                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
4002                                     ring_nr, i, bp->rx_ring_size);
4003                         break;
4004                 }
4005                 prod = NEXT_RX_AGG(prod);
4006         }
4007         rxr->rx_agg_prod = prod;
4008
4009         if (rxr->rx_tpa) {
4010                 dma_addr_t mapping;
4011                 u8 *data;
4012
4013                 for (i = 0; i < bp->max_tpa; i++) {
4014                         data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
4015                         if (!data)
4016                                 return -ENOMEM;
4017
4018                         rxr->rx_tpa[i].data = data;
4019                         rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4020                         rxr->rx_tpa[i].mapping = mapping;
4021                 }
4022         }
4023         return 0;
4024 }
4025
4026 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4027 {
4028         struct bnxt_rx_ring_info *rxr;
4029         struct bnxt_ring_struct *ring;
4030         u32 type;
4031
4032         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4033                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4034
4035         if (NET_IP_ALIGN == 2)
4036                 type |= RX_BD_FLAGS_SOP;
4037
4038         rxr = &bp->rx_ring[ring_nr];
4039         ring = &rxr->rx_ring_struct;
4040         bnxt_init_rxbd_pages(ring, type);
4041
4042         netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4043                              &rxr->bnapi->napi);
4044
4045         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4046                 bpf_prog_add(bp->xdp_prog, 1);
4047                 rxr->xdp_prog = bp->xdp_prog;
4048         }
4049         ring->fw_ring_id = INVALID_HW_RING_ID;
4050
4051         ring = &rxr->rx_agg_ring_struct;
4052         ring->fw_ring_id = INVALID_HW_RING_ID;
4053
4054         if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4055                 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
4056                         RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
4057
4058                 bnxt_init_rxbd_pages(ring, type);
4059         }
4060
4061         return bnxt_alloc_one_rx_ring(bp, ring_nr);
4062 }
4063
4064 static void bnxt_init_cp_rings(struct bnxt *bp)
4065 {
4066         int i, j;
4067
4068         for (i = 0; i < bp->cp_nr_rings; i++) {
4069                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4070                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4071
4072                 ring->fw_ring_id = INVALID_HW_RING_ID;
4073                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4074                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4075                 if (!cpr->cp_ring_arr)
4076                         continue;
4077                 for (j = 0; j < cpr->cp_ring_count; j++) {
4078                         struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4079
4080                         ring = &cpr2->cp_ring_struct;
4081                         ring->fw_ring_id = INVALID_HW_RING_ID;
4082                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4083                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4084                 }
4085         }
4086 }
4087
4088 static int bnxt_init_rx_rings(struct bnxt *bp)
4089 {
4090         int i, rc = 0;
4091
4092         if (BNXT_RX_PAGE_MODE(bp)) {
4093                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4094                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4095         } else {
4096                 bp->rx_offset = BNXT_RX_OFFSET;
4097                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4098         }
4099
4100         for (i = 0; i < bp->rx_nr_rings; i++) {
4101                 rc = bnxt_init_one_rx_ring(bp, i);
4102                 if (rc)
4103                         break;
4104         }
4105
4106         return rc;
4107 }
4108
4109 static int bnxt_init_tx_rings(struct bnxt *bp)
4110 {
4111         u16 i;
4112
4113         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4114                                    BNXT_MIN_TX_DESC_CNT);
4115
4116         for (i = 0; i < bp->tx_nr_rings; i++) {
4117                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4118                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4119
4120                 ring->fw_ring_id = INVALID_HW_RING_ID;
4121
4122                 if (i >= bp->tx_nr_rings_xdp)
4123                         netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4124                                              NETDEV_QUEUE_TYPE_TX,
4125                                              &txr->bnapi->napi);
4126         }
4127
4128         return 0;
4129 }
4130
4131 static void bnxt_free_ring_grps(struct bnxt *bp)
4132 {
4133         kfree(bp->grp_info);
4134         bp->grp_info = NULL;
4135 }
4136
4137 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4138 {
4139         int i;
4140
4141         if (irq_re_init) {
4142                 bp->grp_info = kcalloc(bp->cp_nr_rings,
4143                                        sizeof(struct bnxt_ring_grp_info),
4144                                        GFP_KERNEL);
4145                 if (!bp->grp_info)
4146                         return -ENOMEM;
4147         }
4148         for (i = 0; i < bp->cp_nr_rings; i++) {
4149                 if (irq_re_init)
4150                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4151                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4152                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4153                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4154                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4155         }
4156         return 0;
4157 }
4158
4159 static void bnxt_free_vnics(struct bnxt *bp)
4160 {
4161         kfree(bp->vnic_info);
4162         bp->vnic_info = NULL;
4163         bp->nr_vnics = 0;
4164 }
4165
4166 static int bnxt_alloc_vnics(struct bnxt *bp)
4167 {
4168         int num_vnics = 1;
4169
4170 #ifdef CONFIG_RFS_ACCEL
4171         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS)
4172                 num_vnics += bp->rx_nr_rings;
4173 #endif
4174
4175         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4176                 num_vnics++;
4177
4178         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
4179                                 GFP_KERNEL);
4180         if (!bp->vnic_info)
4181                 return -ENOMEM;
4182
4183         bp->nr_vnics = num_vnics;
4184         return 0;
4185 }
4186
4187 static void bnxt_init_vnics(struct bnxt *bp)
4188 {
4189         int i;
4190
4191         for (i = 0; i < bp->nr_vnics; i++) {
4192                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4193                 int j;
4194
4195                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
4196                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4197                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4198
4199                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4200
4201                 if (bp->vnic_info[i].rss_hash_key) {
4202                         if (!i) {
4203                                 u8 *key = (void *)vnic->rss_hash_key;
4204                                 int k;
4205
4206                                 bp->toeplitz_prefix = 0;
4207                                 get_random_bytes(vnic->rss_hash_key,
4208                                               HW_HASH_KEY_SIZE);
4209                                 for (k = 0; k < 8; k++) {
4210                                         bp->toeplitz_prefix <<= 8;
4211                                         bp->toeplitz_prefix |= key[k];
4212                                 }
4213                         } else {
4214                                 memcpy(vnic->rss_hash_key,
4215                                        bp->vnic_info[0].rss_hash_key,
4216                                        HW_HASH_KEY_SIZE);
4217                         }
4218                 }
4219         }
4220 }
4221
4222 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4223 {
4224         int pages;
4225
4226         pages = ring_size / desc_per_pg;
4227
4228         if (!pages)
4229                 return 1;
4230
4231         pages++;
4232
4233         while (pages & (pages - 1))
4234                 pages++;
4235
4236         return pages;
4237 }
4238
4239 void bnxt_set_tpa_flags(struct bnxt *bp)
4240 {
4241         bp->flags &= ~BNXT_FLAG_TPA;
4242         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4243                 return;
4244         if (bp->dev->features & NETIF_F_LRO)
4245                 bp->flags |= BNXT_FLAG_LRO;
4246         else if (bp->dev->features & NETIF_F_GRO_HW)
4247                 bp->flags |= BNXT_FLAG_GRO;
4248 }
4249
4250 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4251  * be set on entry.
4252  */
4253 void bnxt_set_ring_params(struct bnxt *bp)
4254 {
4255         u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4256         u32 agg_factor = 0, agg_ring_size = 0;
4257
4258         /* 8 for CRC and VLAN */
4259         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4260
4261         rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4262                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4263
4264         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
4265         ring_size = bp->rx_ring_size;
4266         bp->rx_agg_ring_size = 0;
4267         bp->rx_agg_nr_pages = 0;
4268
4269         if (bp->flags & BNXT_FLAG_TPA)
4270                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4271
4272         bp->flags &= ~BNXT_FLAG_JUMBO;
4273         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4274                 u32 jumbo_factor;
4275
4276                 bp->flags |= BNXT_FLAG_JUMBO;
4277                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4278                 if (jumbo_factor > agg_factor)
4279                         agg_factor = jumbo_factor;
4280         }
4281         if (agg_factor) {
4282                 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4283                         ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4284                         netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4285                                     bp->rx_ring_size, ring_size);
4286                         bp->rx_ring_size = ring_size;
4287                 }
4288                 agg_ring_size = ring_size * agg_factor;
4289
4290                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4291                                                         RX_DESC_CNT);
4292                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4293                         u32 tmp = agg_ring_size;
4294
4295                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4296                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4297                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4298                                     tmp, agg_ring_size);
4299                 }
4300                 bp->rx_agg_ring_size = agg_ring_size;
4301                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4302
4303                 if (BNXT_RX_PAGE_MODE(bp)) {
4304                         rx_space = PAGE_SIZE;
4305                         rx_size = PAGE_SIZE -
4306                                   ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4307                                   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4308                 } else {
4309                         rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
4310                         rx_space = rx_size + NET_SKB_PAD +
4311                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4312                 }
4313         }
4314
4315         bp->rx_buf_use_size = rx_size;
4316         bp->rx_buf_size = rx_space;
4317
4318         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4319         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4320
4321         ring_size = bp->tx_ring_size;
4322         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4323         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4324
4325         max_rx_cmpl = bp->rx_ring_size;
4326         /* MAX TPA needs to be added because TPA_START completions are
4327          * immediately recycled, so the TPA completions are not bound by
4328          * the RX ring size.
4329          */
4330         if (bp->flags & BNXT_FLAG_TPA)
4331                 max_rx_cmpl += bp->max_tpa;
4332         /* RX and TPA completions are 32-byte, all others are 16-byte */
4333         ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4334         bp->cp_ring_size = ring_size;
4335
4336         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4337         if (bp->cp_nr_pages > MAX_CP_PAGES) {
4338                 bp->cp_nr_pages = MAX_CP_PAGES;
4339                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4340                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4341                             ring_size, bp->cp_ring_size);
4342         }
4343         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4344         bp->cp_ring_mask = bp->cp_bit - 1;
4345 }
4346
4347 /* Changing allocation mode of RX rings.
4348  * TODO: Update when extending xdp_rxq_info to support allocation modes.
4349  */
4350 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4351 {
4352         struct net_device *dev = bp->dev;
4353
4354         if (page_mode) {
4355                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
4356                 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4357
4358                 if (bp->xdp_prog->aux->xdp_has_frags)
4359                         dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4360                 else
4361                         dev->max_mtu =
4362                                 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4363                 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4364                         bp->flags |= BNXT_FLAG_JUMBO;
4365                         bp->rx_skb_func = bnxt_rx_multi_page_skb;
4366                 } else {
4367                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4368                         bp->rx_skb_func = bnxt_rx_page_skb;
4369                 }
4370                 bp->rx_dir = DMA_BIDIRECTIONAL;
4371                 /* Disable LRO or GRO_HW */
4372                 netdev_update_features(dev);
4373         } else {
4374                 dev->max_mtu = bp->max_mtu;
4375                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4376                 bp->rx_dir = DMA_FROM_DEVICE;
4377                 bp->rx_skb_func = bnxt_rx_skb;
4378         }
4379         return 0;
4380 }
4381
4382 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4383 {
4384         int i;
4385         struct bnxt_vnic_info *vnic;
4386         struct pci_dev *pdev = bp->pdev;
4387
4388         if (!bp->vnic_info)
4389                 return;
4390
4391         for (i = 0; i < bp->nr_vnics; i++) {
4392                 vnic = &bp->vnic_info[i];
4393
4394                 kfree(vnic->fw_grp_ids);
4395                 vnic->fw_grp_ids = NULL;
4396
4397                 kfree(vnic->uc_list);
4398                 vnic->uc_list = NULL;
4399
4400                 if (vnic->mc_list) {
4401                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4402                                           vnic->mc_list, vnic->mc_list_mapping);
4403                         vnic->mc_list = NULL;
4404                 }
4405
4406                 if (vnic->rss_table) {
4407                         dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4408                                           vnic->rss_table,
4409                                           vnic->rss_table_dma_addr);
4410                         vnic->rss_table = NULL;
4411                 }
4412
4413                 vnic->rss_hash_key = NULL;
4414                 vnic->flags = 0;
4415         }
4416 }
4417
4418 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4419 {
4420         int i, rc = 0, size;
4421         struct bnxt_vnic_info *vnic;
4422         struct pci_dev *pdev = bp->pdev;
4423         int max_rings;
4424
4425         for (i = 0; i < bp->nr_vnics; i++) {
4426                 vnic = &bp->vnic_info[i];
4427
4428                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4429                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4430
4431                         if (mem_size > 0) {
4432                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4433                                 if (!vnic->uc_list) {
4434                                         rc = -ENOMEM;
4435                                         goto out;
4436                                 }
4437                         }
4438                 }
4439
4440                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4441                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4442                         vnic->mc_list =
4443                                 dma_alloc_coherent(&pdev->dev,
4444                                                    vnic->mc_list_size,
4445                                                    &vnic->mc_list_mapping,
4446                                                    GFP_KERNEL);
4447                         if (!vnic->mc_list) {
4448                                 rc = -ENOMEM;
4449                                 goto out;
4450                         }
4451                 }
4452
4453                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4454                         goto vnic_skip_grps;
4455
4456                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4457                         max_rings = bp->rx_nr_rings;
4458                 else
4459                         max_rings = 1;
4460
4461                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4462                 if (!vnic->fw_grp_ids) {
4463                         rc = -ENOMEM;
4464                         goto out;
4465                 }
4466 vnic_skip_grps:
4467                 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4468                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4469                         continue;
4470
4471                 /* Allocate rss table and hash key */
4472                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4473                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4474                         size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4475
4476                 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4477                 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4478                                                      vnic->rss_table_size,
4479                                                      &vnic->rss_table_dma_addr,
4480                                                      GFP_KERNEL);
4481                 if (!vnic->rss_table) {
4482                         rc = -ENOMEM;
4483                         goto out;
4484                 }
4485
4486                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4487                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4488         }
4489         return 0;
4490
4491 out:
4492         return rc;
4493 }
4494
4495 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4496 {
4497         struct bnxt_hwrm_wait_token *token;
4498
4499         dma_pool_destroy(bp->hwrm_dma_pool);
4500         bp->hwrm_dma_pool = NULL;
4501
4502         rcu_read_lock();
4503         hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4504                 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4505         rcu_read_unlock();
4506 }
4507
4508 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4509 {
4510         bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4511                                             BNXT_HWRM_DMA_SIZE,
4512                                             BNXT_HWRM_DMA_ALIGN, 0);
4513         if (!bp->hwrm_dma_pool)
4514                 return -ENOMEM;
4515
4516         INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4517
4518         return 0;
4519 }
4520
4521 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4522 {
4523         kfree(stats->hw_masks);
4524         stats->hw_masks = NULL;
4525         kfree(stats->sw_stats);
4526         stats->sw_stats = NULL;
4527         if (stats->hw_stats) {
4528                 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4529                                   stats->hw_stats_map);
4530                 stats->hw_stats = NULL;
4531         }
4532 }
4533
4534 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4535                                 bool alloc_masks)
4536 {
4537         stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4538                                              &stats->hw_stats_map, GFP_KERNEL);
4539         if (!stats->hw_stats)
4540                 return -ENOMEM;
4541
4542         stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4543         if (!stats->sw_stats)
4544                 goto stats_mem_err;
4545
4546         if (alloc_masks) {
4547                 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4548                 if (!stats->hw_masks)
4549                         goto stats_mem_err;
4550         }
4551         return 0;
4552
4553 stats_mem_err:
4554         bnxt_free_stats_mem(bp, stats);
4555         return -ENOMEM;
4556 }
4557
4558 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4559 {
4560         int i;
4561
4562         for (i = 0; i < count; i++)
4563                 mask_arr[i] = mask;
4564 }
4565
4566 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4567 {
4568         int i;
4569
4570         for (i = 0; i < count; i++)
4571                 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4572 }
4573
4574 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4575                                     struct bnxt_stats_mem *stats)
4576 {
4577         struct hwrm_func_qstats_ext_output *resp;
4578         struct hwrm_func_qstats_ext_input *req;
4579         __le64 *hw_masks;
4580         int rc;
4581
4582         if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4583             !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4584                 return -EOPNOTSUPP;
4585
4586         rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4587         if (rc)
4588                 return rc;
4589
4590         req->fid = cpu_to_le16(0xffff);
4591         req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4592
4593         resp = hwrm_req_hold(bp, req);
4594         rc = hwrm_req_send(bp, req);
4595         if (!rc) {
4596                 hw_masks = &resp->rx_ucast_pkts;
4597                 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4598         }
4599         hwrm_req_drop(bp, req);
4600         return rc;
4601 }
4602
4603 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4604 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4605
4606 static void bnxt_init_stats(struct bnxt *bp)
4607 {
4608         struct bnxt_napi *bnapi = bp->bnapi[0];
4609         struct bnxt_cp_ring_info *cpr;
4610         struct bnxt_stats_mem *stats;
4611         __le64 *rx_stats, *tx_stats;
4612         int rc, rx_count, tx_count;
4613         u64 *rx_masks, *tx_masks;
4614         u64 mask;
4615         u8 flags;
4616
4617         cpr = &bnapi->cp_ring;
4618         stats = &cpr->stats;
4619         rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4620         if (rc) {
4621                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4622                         mask = (1ULL << 48) - 1;
4623                 else
4624                         mask = -1ULL;
4625                 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4626         }
4627         if (bp->flags & BNXT_FLAG_PORT_STATS) {
4628                 stats = &bp->port_stats;
4629                 rx_stats = stats->hw_stats;
4630                 rx_masks = stats->hw_masks;
4631                 rx_count = sizeof(struct rx_port_stats) / 8;
4632                 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4633                 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4634                 tx_count = sizeof(struct tx_port_stats) / 8;
4635
4636                 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4637                 rc = bnxt_hwrm_port_qstats(bp, flags);
4638                 if (rc) {
4639                         mask = (1ULL << 40) - 1;
4640
4641                         bnxt_fill_masks(rx_masks, mask, rx_count);
4642                         bnxt_fill_masks(tx_masks, mask, tx_count);
4643                 } else {
4644                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4645                         bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4646                         bnxt_hwrm_port_qstats(bp, 0);
4647                 }
4648         }
4649         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4650                 stats = &bp->rx_port_stats_ext;
4651                 rx_stats = stats->hw_stats;
4652                 rx_masks = stats->hw_masks;
4653                 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4654                 stats = &bp->tx_port_stats_ext;
4655                 tx_stats = stats->hw_stats;
4656                 tx_masks = stats->hw_masks;
4657                 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4658
4659                 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4660                 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4661                 if (rc) {
4662                         mask = (1ULL << 40) - 1;
4663
4664                         bnxt_fill_masks(rx_masks, mask, rx_count);
4665                         if (tx_stats)
4666                                 bnxt_fill_masks(tx_masks, mask, tx_count);
4667                 } else {
4668                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4669                         if (tx_stats)
4670                                 bnxt_copy_hw_masks(tx_masks, tx_stats,
4671                                                    tx_count);
4672                         bnxt_hwrm_port_qstats_ext(bp, 0);
4673                 }
4674         }
4675 }
4676
4677 static void bnxt_free_port_stats(struct bnxt *bp)
4678 {
4679         bp->flags &= ~BNXT_FLAG_PORT_STATS;
4680         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4681
4682         bnxt_free_stats_mem(bp, &bp->port_stats);
4683         bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4684         bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4685 }
4686
4687 static void bnxt_free_ring_stats(struct bnxt *bp)
4688 {
4689         int i;
4690
4691         if (!bp->bnapi)
4692                 return;
4693
4694         for (i = 0; i < bp->cp_nr_rings; i++) {
4695                 struct bnxt_napi *bnapi = bp->bnapi[i];
4696                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4697
4698                 bnxt_free_stats_mem(bp, &cpr->stats);
4699         }
4700 }
4701
4702 static int bnxt_alloc_stats(struct bnxt *bp)
4703 {
4704         u32 size, i;
4705         int rc;
4706
4707         size = bp->hw_ring_stats_size;
4708
4709         for (i = 0; i < bp->cp_nr_rings; i++) {
4710                 struct bnxt_napi *bnapi = bp->bnapi[i];
4711                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4712
4713                 cpr->stats.len = size;
4714                 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4715                 if (rc)
4716                         return rc;
4717
4718                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4719         }
4720
4721         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4722                 return 0;
4723
4724         if (bp->port_stats.hw_stats)
4725                 goto alloc_ext_stats;
4726
4727         bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4728         rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4729         if (rc)
4730                 return rc;
4731
4732         bp->flags |= BNXT_FLAG_PORT_STATS;
4733
4734 alloc_ext_stats:
4735         /* Display extended statistics only if FW supports it */
4736         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4737                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4738                         return 0;
4739
4740         if (bp->rx_port_stats_ext.hw_stats)
4741                 goto alloc_tx_ext_stats;
4742
4743         bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4744         rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4745         /* Extended stats are optional */
4746         if (rc)
4747                 return 0;
4748
4749 alloc_tx_ext_stats:
4750         if (bp->tx_port_stats_ext.hw_stats)
4751                 return 0;
4752
4753         if (bp->hwrm_spec_code >= 0x10902 ||
4754             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4755                 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4756                 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4757                 /* Extended stats are optional */
4758                 if (rc)
4759                         return 0;
4760         }
4761         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4762         return 0;
4763 }
4764
4765 static void bnxt_clear_ring_indices(struct bnxt *bp)
4766 {
4767         int i, j;
4768
4769         if (!bp->bnapi)
4770                 return;
4771
4772         for (i = 0; i < bp->cp_nr_rings; i++) {
4773                 struct bnxt_napi *bnapi = bp->bnapi[i];
4774                 struct bnxt_cp_ring_info *cpr;
4775                 struct bnxt_rx_ring_info *rxr;
4776                 struct bnxt_tx_ring_info *txr;
4777
4778                 if (!bnapi)
4779                         continue;
4780
4781                 cpr = &bnapi->cp_ring;
4782                 cpr->cp_raw_cons = 0;
4783
4784                 bnxt_for_each_napi_tx(j, bnapi, txr) {
4785                         txr->tx_prod = 0;
4786                         txr->tx_cons = 0;
4787                         txr->tx_hw_cons = 0;
4788                 }
4789
4790                 rxr = bnapi->rx_ring;
4791                 if (rxr) {
4792                         rxr->rx_prod = 0;
4793                         rxr->rx_agg_prod = 0;
4794                         rxr->rx_sw_agg_prod = 0;
4795                         rxr->rx_next_cons = 0;
4796                 }
4797                 bnapi->events = 0;
4798         }
4799 }
4800
4801 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
4802 {
4803         int i;
4804
4805         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
4806          * safe to delete the hash table.
4807          */
4808         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4809                 struct hlist_head *head;
4810                 struct hlist_node *tmp;
4811                 struct bnxt_ntuple_filter *fltr;
4812
4813                 head = &bp->ntp_fltr_hash_tbl[i];
4814                 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
4815                         bnxt_del_l2_filter(bp, fltr->l2_fltr);
4816                         if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
4817                                 continue;
4818                         hlist_del(&fltr->base.hash);
4819                         clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
4820                         bp->ntp_fltr_count--;
4821                         kfree(fltr);
4822                 }
4823         }
4824         if (!all)
4825                 return;
4826
4827         bitmap_free(bp->ntp_fltr_bmap);
4828         bp->ntp_fltr_bmap = NULL;
4829         bp->ntp_fltr_count = 0;
4830 }
4831
4832 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4833 {
4834         int i, rc = 0;
4835
4836         if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
4837                 return 0;
4838
4839         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4840                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4841
4842         bp->ntp_fltr_count = 0;
4843         bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL);
4844
4845         if (!bp->ntp_fltr_bmap)
4846                 rc = -ENOMEM;
4847
4848         return rc;
4849 }
4850
4851 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
4852 {
4853         int i;
4854
4855         for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
4856                 struct hlist_head *head;
4857                 struct hlist_node *tmp;
4858                 struct bnxt_l2_filter *fltr;
4859
4860                 head = &bp->l2_fltr_hash_tbl[i];
4861                 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
4862                         if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
4863                                 continue;
4864                         hlist_del(&fltr->base.hash);
4865                         if (fltr->base.flags) {
4866                                 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
4867                                 bp->ntp_fltr_count--;
4868                         }
4869                         kfree(fltr);
4870                 }
4871         }
4872 }
4873
4874 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
4875 {
4876         int i;
4877
4878         for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
4879                 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
4880         get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
4881 }
4882
4883 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4884 {
4885         bnxt_free_vnic_attributes(bp);
4886         bnxt_free_tx_rings(bp);
4887         bnxt_free_rx_rings(bp);
4888         bnxt_free_cp_rings(bp);
4889         bnxt_free_all_cp_arrays(bp);
4890         bnxt_free_ntp_fltrs(bp, false);
4891         bnxt_free_l2_filters(bp, false);
4892         if (irq_re_init) {
4893                 bnxt_free_ring_stats(bp);
4894                 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4895                     test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4896                         bnxt_free_port_stats(bp);
4897                 bnxt_free_ring_grps(bp);
4898                 bnxt_free_vnics(bp);
4899                 kfree(bp->tx_ring_map);
4900                 bp->tx_ring_map = NULL;
4901                 kfree(bp->tx_ring);
4902                 bp->tx_ring = NULL;
4903                 kfree(bp->rx_ring);
4904                 bp->rx_ring = NULL;
4905                 kfree(bp->bnapi);
4906                 bp->bnapi = NULL;
4907         } else {
4908                 bnxt_clear_ring_indices(bp);
4909         }
4910 }
4911
4912 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4913 {
4914         int i, j, rc, size, arr_size;
4915         void *bnapi;
4916
4917         if (irq_re_init) {
4918                 /* Allocate bnapi mem pointer array and mem block for
4919                  * all queues
4920                  */
4921                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4922                                 bp->cp_nr_rings);
4923                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4924                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4925                 if (!bnapi)
4926                         return -ENOMEM;
4927
4928                 bp->bnapi = bnapi;
4929                 bnapi += arr_size;
4930                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4931                         bp->bnapi[i] = bnapi;
4932                         bp->bnapi[i]->index = i;
4933                         bp->bnapi[i]->bp = bp;
4934                         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
4935                                 struct bnxt_cp_ring_info *cpr =
4936                                         &bp->bnapi[i]->cp_ring;
4937
4938                                 cpr->cp_ring_struct.ring_mem.flags =
4939                                         BNXT_RMEM_RING_PTE_FLAG;
4940                         }
4941                 }
4942
4943                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4944                                       sizeof(struct bnxt_rx_ring_info),
4945                                       GFP_KERNEL);
4946                 if (!bp->rx_ring)
4947                         return -ENOMEM;
4948
4949                 for (i = 0; i < bp->rx_nr_rings; i++) {
4950                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4951
4952                         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
4953                                 rxr->rx_ring_struct.ring_mem.flags =
4954                                         BNXT_RMEM_RING_PTE_FLAG;
4955                                 rxr->rx_agg_ring_struct.ring_mem.flags =
4956                                         BNXT_RMEM_RING_PTE_FLAG;
4957                         } else {
4958                                 rxr->rx_cpr =  &bp->bnapi[i]->cp_ring;
4959                         }
4960                         rxr->bnapi = bp->bnapi[i];
4961                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4962                 }
4963
4964                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4965                                       sizeof(struct bnxt_tx_ring_info),
4966                                       GFP_KERNEL);
4967                 if (!bp->tx_ring)
4968                         return -ENOMEM;
4969
4970                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4971                                           GFP_KERNEL);
4972
4973                 if (!bp->tx_ring_map)
4974                         return -ENOMEM;
4975
4976                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4977                         j = 0;
4978                 else
4979                         j = bp->rx_nr_rings;
4980
4981                 for (i = 0; i < bp->tx_nr_rings; i++) {
4982                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4983                         struct bnxt_napi *bnapi2;
4984
4985                         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4986                                 txr->tx_ring_struct.ring_mem.flags =
4987                                         BNXT_RMEM_RING_PTE_FLAG;
4988                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4989                         if (i >= bp->tx_nr_rings_xdp) {
4990                                 int k = j + BNXT_RING_TO_TC_OFF(bp, i);
4991
4992                                 bnapi2 = bp->bnapi[k];
4993                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4994                                 txr->tx_napi_idx =
4995                                         BNXT_RING_TO_TC(bp, txr->txq_index);
4996                                 bnapi2->tx_ring[txr->tx_napi_idx] = txr;
4997                                 bnapi2->tx_int = bnxt_tx_int;
4998                         } else {
4999                                 bnapi2 = bp->bnapi[j];
5000                                 bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5001                                 bnapi2->tx_ring[0] = txr;
5002                                 bnapi2->tx_int = bnxt_tx_int_xdp;
5003                                 j++;
5004                         }
5005                         txr->bnapi = bnapi2;
5006                         if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5007                                 txr->tx_cpr = &bnapi2->cp_ring;
5008                 }
5009
5010                 rc = bnxt_alloc_stats(bp);
5011                 if (rc)
5012                         goto alloc_mem_err;
5013                 bnxt_init_stats(bp);
5014
5015                 rc = bnxt_alloc_ntp_fltrs(bp);
5016                 if (rc)
5017                         goto alloc_mem_err;
5018
5019                 rc = bnxt_alloc_vnics(bp);
5020                 if (rc)
5021                         goto alloc_mem_err;
5022         }
5023
5024         rc = bnxt_alloc_all_cp_arrays(bp);
5025         if (rc)
5026                 goto alloc_mem_err;
5027
5028         bnxt_init_ring_struct(bp);
5029
5030         rc = bnxt_alloc_rx_rings(bp);
5031         if (rc)
5032                 goto alloc_mem_err;
5033
5034         rc = bnxt_alloc_tx_rings(bp);
5035         if (rc)
5036                 goto alloc_mem_err;
5037
5038         rc = bnxt_alloc_cp_rings(bp);
5039         if (rc)
5040                 goto alloc_mem_err;
5041
5042         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
5043                                   BNXT_VNIC_UCAST_FLAG;
5044         rc = bnxt_alloc_vnic_attributes(bp);
5045         if (rc)
5046                 goto alloc_mem_err;
5047         return 0;
5048
5049 alloc_mem_err:
5050         bnxt_free_mem(bp, true);
5051         return rc;
5052 }
5053
5054 static void bnxt_disable_int(struct bnxt *bp)
5055 {
5056         int i;
5057
5058         if (!bp->bnapi)
5059                 return;
5060
5061         for (i = 0; i < bp->cp_nr_rings; i++) {
5062                 struct bnxt_napi *bnapi = bp->bnapi[i];
5063                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5064                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5065
5066                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
5067                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5068         }
5069 }
5070
5071 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5072 {
5073         struct bnxt_napi *bnapi = bp->bnapi[n];
5074         struct bnxt_cp_ring_info *cpr;
5075
5076         cpr = &bnapi->cp_ring;
5077         return cpr->cp_ring_struct.map_idx;
5078 }
5079
5080 static void bnxt_disable_int_sync(struct bnxt *bp)
5081 {
5082         int i;
5083
5084         if (!bp->irq_tbl)
5085                 return;
5086
5087         atomic_inc(&bp->intr_sem);
5088
5089         bnxt_disable_int(bp);
5090         for (i = 0; i < bp->cp_nr_rings; i++) {
5091                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5092
5093                 synchronize_irq(bp->irq_tbl[map_idx].vector);
5094         }
5095 }
5096
5097 static void bnxt_enable_int(struct bnxt *bp)
5098 {
5099         int i;
5100
5101         atomic_set(&bp->intr_sem, 0);
5102         for (i = 0; i < bp->cp_nr_rings; i++) {
5103                 struct bnxt_napi *bnapi = bp->bnapi[i];
5104                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5105
5106                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5107         }
5108 }
5109
5110 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5111                             bool async_only)
5112 {
5113         DECLARE_BITMAP(async_events_bmap, 256);
5114         u32 *events = (u32 *)async_events_bmap;
5115         struct hwrm_func_drv_rgtr_output *resp;
5116         struct hwrm_func_drv_rgtr_input *req;
5117         u32 flags;
5118         int rc, i;
5119
5120         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5121         if (rc)
5122                 return rc;
5123
5124         req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5125                                    FUNC_DRV_RGTR_REQ_ENABLES_VER |
5126                                    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5127
5128         req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5129         flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5130         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5131                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5132         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5133                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5134                          FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5135         req->flags = cpu_to_le32(flags);
5136         req->ver_maj_8b = DRV_VER_MAJ;
5137         req->ver_min_8b = DRV_VER_MIN;
5138         req->ver_upd_8b = DRV_VER_UPD;
5139         req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5140         req->ver_min = cpu_to_le16(DRV_VER_MIN);
5141         req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5142
5143         if (BNXT_PF(bp)) {
5144                 u32 data[8];
5145                 int i;
5146
5147                 memset(data, 0, sizeof(data));
5148                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5149                         u16 cmd = bnxt_vf_req_snif[i];
5150                         unsigned int bit, idx;
5151
5152                         idx = cmd / 32;
5153                         bit = cmd % 32;
5154                         data[idx] |= 1 << bit;
5155                 }
5156
5157                 for (i = 0; i < 8; i++)
5158                         req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5159
5160                 req->enables |=
5161                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5162         }
5163
5164         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5165                 req->flags |= cpu_to_le32(
5166                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5167
5168         memset(async_events_bmap, 0, sizeof(async_events_bmap));
5169         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5170                 u16 event_id = bnxt_async_events_arr[i];
5171
5172                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5173                     !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5174                         continue;
5175                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5176                     !bp->ptp_cfg)
5177                         continue;
5178                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
5179         }
5180         if (bmap && bmap_size) {
5181                 for (i = 0; i < bmap_size; i++) {
5182                         if (test_bit(i, bmap))
5183                                 __set_bit(i, async_events_bmap);
5184                 }
5185         }
5186         for (i = 0; i < 8; i++)
5187                 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5188
5189         if (async_only)
5190                 req->enables =
5191                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5192
5193         resp = hwrm_req_hold(bp, req);
5194         rc = hwrm_req_send(bp, req);
5195         if (!rc) {
5196                 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5197                 if (resp->flags &
5198                     cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5199                         bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5200         }
5201         hwrm_req_drop(bp, req);
5202         return rc;
5203 }
5204
5205 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5206 {
5207         struct hwrm_func_drv_unrgtr_input *req;
5208         int rc;
5209
5210         if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5211                 return 0;
5212
5213         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5214         if (rc)
5215                 return rc;
5216         return hwrm_req_send(bp, req);
5217 }
5218
5219 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5220
5221 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5222 {
5223         struct hwrm_tunnel_dst_port_free_input *req;
5224         int rc;
5225
5226         if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5227             bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5228                 return 0;
5229         if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5230             bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5231                 return 0;
5232
5233         rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5234         if (rc)
5235                 return rc;
5236
5237         req->tunnel_type = tunnel_type;
5238
5239         switch (tunnel_type) {
5240         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5241                 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5242                 bp->vxlan_port = 0;
5243                 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5244                 break;
5245         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5246                 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5247                 bp->nge_port = 0;
5248                 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5249                 break;
5250         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5251                 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5252                 bp->vxlan_gpe_port = 0;
5253                 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5254                 break;
5255         default:
5256                 break;
5257         }
5258
5259         rc = hwrm_req_send(bp, req);
5260         if (rc)
5261                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5262                            rc);
5263         if (bp->flags & BNXT_FLAG_TPA)
5264                 bnxt_set_tpa(bp, true);
5265         return rc;
5266 }
5267
5268 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5269                                            u8 tunnel_type)
5270 {
5271         struct hwrm_tunnel_dst_port_alloc_output *resp;
5272         struct hwrm_tunnel_dst_port_alloc_input *req;
5273         int rc;
5274
5275         rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5276         if (rc)
5277                 return rc;
5278
5279         req->tunnel_type = tunnel_type;
5280         req->tunnel_dst_port_val = port;
5281
5282         resp = hwrm_req_hold(bp, req);
5283         rc = hwrm_req_send(bp, req);
5284         if (rc) {
5285                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5286                            rc);
5287                 goto err_out;
5288         }
5289
5290         switch (tunnel_type) {
5291         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5292                 bp->vxlan_port = port;
5293                 bp->vxlan_fw_dst_port_id =
5294                         le16_to_cpu(resp->tunnel_dst_port_id);
5295                 break;
5296         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5297                 bp->nge_port = port;
5298                 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5299                 break;
5300         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5301                 bp->vxlan_gpe_port = port;
5302                 bp->vxlan_gpe_fw_dst_port_id =
5303                         le16_to_cpu(resp->tunnel_dst_port_id);
5304                 break;
5305         default:
5306                 break;
5307         }
5308         if (bp->flags & BNXT_FLAG_TPA)
5309                 bnxt_set_tpa(bp, true);
5310
5311 err_out:
5312         hwrm_req_drop(bp, req);
5313         return rc;
5314 }
5315
5316 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5317 {
5318         struct hwrm_cfa_l2_set_rx_mask_input *req;
5319         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5320         int rc;
5321
5322         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5323         if (rc)
5324                 return rc;
5325
5326         req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5327         if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5328                 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5329                 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5330         }
5331         req->mask = cpu_to_le32(vnic->rx_mask);
5332         return hwrm_req_send_silent(bp, req);
5333 }
5334
5335 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5336 {
5337         if (!atomic_dec_and_test(&fltr->refcnt))
5338                 return;
5339         spin_lock_bh(&bp->ntp_fltr_lock);
5340         if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5341                 spin_unlock_bh(&bp->ntp_fltr_lock);
5342                 return;
5343         }
5344         hlist_del_rcu(&fltr->base.hash);
5345         if (fltr->base.flags) {
5346                 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5347                 bp->ntp_fltr_count--;
5348         }
5349         spin_unlock_bh(&bp->ntp_fltr_lock);
5350         kfree_rcu(fltr, base.rcu);
5351 }
5352
5353 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5354                                                       struct bnxt_l2_key *key,
5355                                                       u32 idx)
5356 {
5357         struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5358         struct bnxt_l2_filter *fltr;
5359
5360         hlist_for_each_entry_rcu(fltr, head, base.hash) {
5361                 struct bnxt_l2_key *l2_key = &fltr->l2_key;
5362
5363                 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5364                     l2_key->vlan == key->vlan)
5365                         return fltr;
5366         }
5367         return NULL;
5368 }
5369
5370 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5371                                                     struct bnxt_l2_key *key,
5372                                                     u32 idx)
5373 {
5374         struct bnxt_l2_filter *fltr = NULL;
5375
5376         rcu_read_lock();
5377         fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5378         if (fltr)
5379                 atomic_inc(&fltr->refcnt);
5380         rcu_read_unlock();
5381         return fltr;
5382 }
5383
5384 #define BNXT_IPV4_4TUPLE(bp, fkeys)                                     \
5385         (((fkeys)->basic.ip_proto == IPPROTO_TCP &&                     \
5386           (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) ||  \
5387          ((fkeys)->basic.ip_proto == IPPROTO_UDP &&                     \
5388           (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5389
5390 #define BNXT_IPV6_4TUPLE(bp, fkeys)                                     \
5391         (((fkeys)->basic.ip_proto == IPPROTO_TCP &&                     \
5392           (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) ||  \
5393          ((fkeys)->basic.ip_proto == IPPROTO_UDP &&                     \
5394           (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5395
5396 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5397 {
5398         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5399                 if (BNXT_IPV4_4TUPLE(bp, fkeys))
5400                         return sizeof(fkeys->addrs.v4addrs) +
5401                                sizeof(fkeys->ports);
5402
5403                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5404                         return sizeof(fkeys->addrs.v4addrs);
5405         }
5406
5407         if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5408                 if (BNXT_IPV6_4TUPLE(bp, fkeys))
5409                         return sizeof(fkeys->addrs.v6addrs) +
5410                                sizeof(fkeys->ports);
5411
5412                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5413                         return sizeof(fkeys->addrs.v6addrs);
5414         }
5415
5416         return 0;
5417 }
5418
5419 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
5420                          const unsigned char *key)
5421 {
5422         u64 prefix = bp->toeplitz_prefix, hash = 0;
5423         struct bnxt_ipv4_tuple tuple4;
5424         struct bnxt_ipv6_tuple tuple6;
5425         int i, j, len = 0;
5426         u8 *four_tuple;
5427
5428         len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
5429         if (!len)
5430                 return 0;
5431
5432         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5433                 tuple4.v4addrs = fkeys->addrs.v4addrs;
5434                 tuple4.ports = fkeys->ports;
5435                 four_tuple = (unsigned char *)&tuple4;
5436         } else {
5437                 tuple6.v6addrs = fkeys->addrs.v6addrs;
5438                 tuple6.ports = fkeys->ports;
5439                 four_tuple = (unsigned char *)&tuple6;
5440         }
5441
5442         for (i = 0, j = 8; i < len; i++, j++) {
5443                 u8 byte = four_tuple[i];
5444                 int bit;
5445
5446                 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
5447                         if (byte & 0x80)
5448                                 hash ^= prefix;
5449                 }
5450                 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
5451         }
5452
5453         /* The valid part of the hash is in the upper 32 bits. */
5454         return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
5455 }
5456
5457 #ifdef CONFIG_RFS_ACCEL
5458 static struct bnxt_l2_filter *
5459 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
5460 {
5461         struct bnxt_l2_filter *fltr;
5462         u32 idx;
5463
5464         idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5465               BNXT_L2_FLTR_HASH_MASK;
5466         fltr = bnxt_lookup_l2_filter(bp, key, idx);
5467         return fltr;
5468 }
5469 #endif
5470
5471 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
5472                                struct bnxt_l2_key *key, u32 idx)
5473 {
5474         struct hlist_head *head;
5475
5476         ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
5477         fltr->l2_key.vlan = key->vlan;
5478         fltr->base.type = BNXT_FLTR_TYPE_L2;
5479         if (fltr->base.flags) {
5480                 int bit_id;
5481
5482                 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5483                                                  BNXT_MAX_FLTR, 0);
5484                 if (bit_id < 0)
5485                         return -ENOMEM;
5486                 fltr->base.sw_id = (u16)bit_id;
5487         }
5488         head = &bp->l2_fltr_hash_tbl[idx];
5489         hlist_add_head_rcu(&fltr->base.hash, head);
5490         set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
5491         atomic_set(&fltr->refcnt, 1);
5492         return 0;
5493 }
5494
5495 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
5496                                                    struct bnxt_l2_key *key,
5497                                                    gfp_t gfp)
5498 {
5499         struct bnxt_l2_filter *fltr;
5500         u32 idx;
5501         int rc;
5502
5503         idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5504               BNXT_L2_FLTR_HASH_MASK;
5505         fltr = bnxt_lookup_l2_filter(bp, key, idx);
5506         if (fltr)
5507                 return fltr;
5508
5509         fltr = kzalloc(sizeof(*fltr), gfp);
5510         if (!fltr)
5511                 return ERR_PTR(-ENOMEM);
5512         spin_lock_bh(&bp->ntp_fltr_lock);
5513         rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5514         spin_unlock_bh(&bp->ntp_fltr_lock);
5515         if (rc) {
5516                 bnxt_del_l2_filter(bp, fltr);
5517                 fltr = ERR_PTR(rc);
5518         }
5519         return fltr;
5520 }
5521
5522 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
5523 {
5524 #ifdef CONFIG_BNXT_SRIOV
5525         struct bnxt_vf_info *vf = &pf->vf[vf_idx];
5526
5527         return vf->fw_fid;
5528 #else
5529         return INVALID_HW_RING_ID;
5530 #endif
5531 }
5532
5533 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5534 {
5535         struct hwrm_cfa_l2_filter_free_input *req;
5536         u16 target_id = 0xffff;
5537         int rc;
5538
5539         if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
5540                 struct bnxt_pf_info *pf = &bp->pf;
5541
5542                 if (fltr->base.vf_idx >= pf->active_vfs)
5543                         return -EINVAL;
5544
5545                 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
5546                 if (target_id == INVALID_HW_RING_ID)
5547                         return -EINVAL;
5548         }
5549
5550         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5551         if (rc)
5552                 return rc;
5553
5554         req->target_id = cpu_to_le16(target_id);
5555         req->l2_filter_id = fltr->base.filter_id;
5556         return hwrm_req_send(bp, req);
5557 }
5558
5559 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5560 {
5561         struct hwrm_cfa_l2_filter_alloc_output *resp;
5562         struct hwrm_cfa_l2_filter_alloc_input *req;
5563         u16 target_id = 0xffff;
5564         int rc;
5565
5566         if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
5567                 struct bnxt_pf_info *pf = &bp->pf;
5568
5569                 if (fltr->base.vf_idx >= pf->active_vfs)
5570                         return -EINVAL;
5571
5572                 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
5573         }
5574         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5575         if (rc)
5576                 return rc;
5577
5578         req->target_id = cpu_to_le16(target_id);
5579         req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5580
5581         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5582                 req->flags |=
5583                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
5584         req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
5585         req->enables =
5586                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5587                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5588                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5589         ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
5590         eth_broadcast_addr(req->l2_addr_mask);
5591
5592         if (fltr->l2_key.vlan) {
5593                 req->enables |=
5594                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
5595                                 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
5596                                 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
5597                 req->num_vlans = 1;
5598                 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
5599                 req->l2_ivlan_mask = cpu_to_le16(0xfff);
5600         }
5601
5602         resp = hwrm_req_hold(bp, req);
5603         rc = hwrm_req_send(bp, req);
5604         if (!rc) {
5605                 fltr->base.filter_id = resp->l2_filter_id;
5606                 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
5607         }
5608         hwrm_req_drop(bp, req);
5609         return rc;
5610 }
5611
5612 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
5613                                      struct bnxt_ntuple_filter *fltr)
5614 {
5615         struct hwrm_cfa_ntuple_filter_free_input *req;
5616         int rc;
5617
5618         set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
5619         rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
5620         if (rc)
5621                 return rc;
5622
5623         req->ntuple_filter_id = fltr->base.filter_id;
5624         return hwrm_req_send(bp, req);
5625 }
5626
5627 #define BNXT_NTP_FLTR_FLAGS                                     \
5628         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
5629          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
5630          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
5631          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
5632          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
5633          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
5634          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
5635          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
5636          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
5637          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
5638          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
5639          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
5640          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
5641
5642 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
5643                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
5644
5645 void bnxt_fill_ipv6_mask(__be32 mask[4])
5646 {
5647         int i;
5648
5649         for (i = 0; i < 4; i++)
5650                 mask[i] = cpu_to_be32(~0);
5651 }
5652
5653 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
5654                                       struct bnxt_ntuple_filter *fltr)
5655 {
5656         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
5657         struct hwrm_cfa_ntuple_filter_alloc_input *req;
5658         struct flow_keys *keys = &fltr->fkeys;
5659         struct bnxt_l2_filter *l2_fltr;
5660         struct bnxt_vnic_info *vnic;
5661         u32 flags = 0;
5662         int rc;
5663
5664         rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
5665         if (rc)
5666                 return rc;
5667
5668         l2_fltr = fltr->l2_fltr;
5669         req->l2_filter_id = l2_fltr->base.filter_id;
5670
5671
5672         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
5673                 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
5674                 req->dst_id = cpu_to_le16(fltr->base.rxq);
5675         } else {
5676                 vnic = &bp->vnic_info[fltr->base.rxq + 1];
5677                 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
5678         }
5679         req->flags = cpu_to_le32(flags);
5680         req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
5681
5682         req->ethertype = htons(ETH_P_IP);
5683         req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
5684         req->ip_protocol = keys->basic.ip_proto;
5685
5686         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
5687                 req->ethertype = htons(ETH_P_IPV6);
5688                 req->ip_addr_type =
5689                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
5690                 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
5691                         *(struct in6_addr *)&req->src_ipaddr[0] =
5692                                 keys->addrs.v6addrs.src;
5693                         bnxt_fill_ipv6_mask(req->src_ipaddr_mask);
5694                 }
5695                 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
5696                         *(struct in6_addr *)&req->dst_ipaddr[0] =
5697                                 keys->addrs.v6addrs.dst;
5698                         bnxt_fill_ipv6_mask(req->dst_ipaddr_mask);
5699                 }
5700         } else {
5701                 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
5702                         req->src_ipaddr[0] = keys->addrs.v4addrs.src;
5703                         req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
5704                 }
5705                 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
5706                         req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
5707                         req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
5708                 }
5709         }
5710         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
5711                 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
5712                 req->tunnel_type =
5713                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
5714         }
5715
5716         if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
5717                 req->src_port = keys->ports.src;
5718                 req->src_port_mask = cpu_to_be16(0xffff);
5719         }
5720         if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
5721                 req->dst_port = keys->ports.dst;
5722                 req->dst_port_mask = cpu_to_be16(0xffff);
5723         }
5724
5725         resp = hwrm_req_hold(bp, req);
5726         rc = hwrm_req_send(bp, req);
5727         if (!rc)
5728                 fltr->base.filter_id = resp->ntuple_filter_id;
5729         hwrm_req_drop(bp, req);
5730         return rc;
5731 }
5732
5733 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5734                                      const u8 *mac_addr)
5735 {
5736         struct bnxt_l2_filter *fltr;
5737         struct bnxt_l2_key key;
5738         int rc;
5739
5740         ether_addr_copy(key.dst_mac_addr, mac_addr);
5741         key.vlan = 0;
5742         fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
5743         if (IS_ERR(fltr))
5744                 return PTR_ERR(fltr);
5745
5746         fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
5747         rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
5748         if (rc)
5749                 bnxt_del_l2_filter(bp, fltr);
5750         else
5751                 bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
5752         return rc;
5753 }
5754
5755 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5756 {
5757         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5758
5759         /* Any associated ntuple filters will also be cleared by firmware. */
5760         for (i = 0; i < num_of_vnics; i++) {
5761                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5762
5763                 for (j = 0; j < vnic->uc_filter_count; j++) {
5764                         struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
5765
5766                         bnxt_hwrm_l2_filter_free(bp, fltr);
5767                         bnxt_del_l2_filter(bp, fltr);
5768                 }
5769                 vnic->uc_filter_count = 0;
5770         }
5771 }
5772
5773 #define BNXT_DFLT_TUNL_TPA_BMAP                         \
5774         (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE |       \
5775          VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 |      \
5776          VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
5777
5778 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
5779                                            struct hwrm_vnic_tpa_cfg_input *req)
5780 {
5781         u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
5782
5783         if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
5784                 return;
5785
5786         if (bp->vxlan_port)
5787                 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
5788         if (bp->vxlan_gpe_port)
5789                 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
5790         if (bp->nge_port)
5791                 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
5792
5793         req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
5794         req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
5795 }
5796
5797 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5798 {
5799         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5800         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
5801         struct hwrm_vnic_tpa_cfg_input *req;
5802         int rc;
5803
5804         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5805                 return 0;
5806
5807         rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
5808         if (rc)
5809                 return rc;
5810
5811         if (tpa_flags) {
5812                 u16 mss = bp->dev->mtu - 40;
5813                 u32 nsegs, n, segs = 0, flags;
5814
5815                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5816                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5817                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5818                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5819                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5820                 if (tpa_flags & BNXT_FLAG_GRO)
5821                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5822
5823                 req->flags = cpu_to_le32(flags);
5824
5825                 req->enables =
5826                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5827                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5828                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5829
5830                 /* Number of segs are log2 units, and first packet is not
5831                  * included as part of this units.
5832                  */
5833                 if (mss <= BNXT_RX_PAGE_SIZE) {
5834                         n = BNXT_RX_PAGE_SIZE / mss;
5835                         nsegs = (MAX_SKB_FRAGS - 1) * n;
5836                 } else {
5837                         n = mss / BNXT_RX_PAGE_SIZE;
5838                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
5839                                 n++;
5840                         nsegs = (MAX_SKB_FRAGS - n) / n;
5841                 }
5842
5843                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5844                         segs = MAX_TPA_SEGS_P5;
5845                         max_aggs = bp->max_tpa;
5846                 } else {
5847                         segs = ilog2(nsegs);
5848                 }
5849                 req->max_agg_segs = cpu_to_le16(segs);
5850                 req->max_aggs = cpu_to_le16(max_aggs);
5851
5852                 req->min_agg_len = cpu_to_le32(512);
5853                 bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
5854         }
5855         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5856
5857         return hwrm_req_send(bp, req);
5858 }
5859
5860 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5861 {
5862         struct bnxt_ring_grp_info *grp_info;
5863
5864         grp_info = &bp->grp_info[ring->grp_idx];
5865         return grp_info->cp_fw_ring_id;
5866 }
5867
5868 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5869 {
5870         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5871                 return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
5872         else
5873                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5874 }
5875
5876 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5877 {
5878         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5879                 return txr->tx_cpr->cp_ring_struct.fw_ring_id;
5880         else
5881                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5882 }
5883
5884 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5885 {
5886         int entries;
5887
5888         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5889                 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5890         else
5891                 entries = HW_HASH_INDEX_SIZE;
5892
5893         bp->rss_indir_tbl_entries = entries;
5894         bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5895                                           GFP_KERNEL);
5896         if (!bp->rss_indir_tbl)
5897                 return -ENOMEM;
5898         return 0;
5899 }
5900
5901 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5902 {
5903         u16 max_rings, max_entries, pad, i;
5904
5905         if (!bp->rx_nr_rings)
5906                 return;
5907
5908         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5909                 max_rings = bp->rx_nr_rings - 1;
5910         else
5911                 max_rings = bp->rx_nr_rings;
5912
5913         max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5914
5915         for (i = 0; i < max_entries; i++)
5916                 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5917
5918         pad = bp->rss_indir_tbl_entries - max_entries;
5919         if (pad)
5920                 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5921 }
5922
5923 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5924 {
5925         u16 i, tbl_size, max_ring = 0;
5926
5927         if (!bp->rss_indir_tbl)
5928                 return 0;
5929
5930         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5931         for (i = 0; i < tbl_size; i++)
5932                 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5933         return max_ring;
5934 }
5935
5936 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5937 {
5938         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5939                 if (!rx_rings)
5940                         return 0;
5941                 return bnxt_calc_nr_ring_pages(rx_rings - 1,
5942                                                BNXT_RSS_TABLE_ENTRIES_P5);
5943         }
5944         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5945                 return 2;
5946         return 1;
5947 }
5948
5949 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5950 {
5951         bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5952         u16 i, j;
5953
5954         /* Fill the RSS indirection table with ring group ids */
5955         for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5956                 if (!no_rss)
5957                         j = bp->rss_indir_tbl[i];
5958                 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5959         }
5960 }
5961
5962 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5963                                     struct bnxt_vnic_info *vnic)
5964 {
5965         __le16 *ring_tbl = vnic->rss_table;
5966         struct bnxt_rx_ring_info *rxr;
5967         u16 tbl_size, i;
5968
5969         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5970
5971         for (i = 0; i < tbl_size; i++) {
5972                 u16 ring_id, j;
5973
5974                 j = bp->rss_indir_tbl[i];
5975                 rxr = &bp->rx_ring[j];
5976
5977                 ring_id = rxr->rx_ring_struct.fw_ring_id;
5978                 *ring_tbl++ = cpu_to_le16(ring_id);
5979                 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5980                 *ring_tbl++ = cpu_to_le16(ring_id);
5981         }
5982 }
5983
5984 static void
5985 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
5986                          struct bnxt_vnic_info *vnic)
5987 {
5988         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5989                 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5990         else
5991                 bnxt_fill_hw_rss_tbl(bp, vnic);
5992
5993         if (bp->rss_hash_delta) {
5994                 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
5995                 if (bp->rss_hash_cfg & bp->rss_hash_delta)
5996                         req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
5997                 else
5998                         req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
5999         } else {
6000                 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6001         }
6002         req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6003         req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6004         req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6005 }
6006
6007 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
6008 {
6009         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6010         struct hwrm_vnic_rss_cfg_input *req;
6011         int rc;
6012
6013         if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6014             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6015                 return 0;
6016
6017         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6018         if (rc)
6019                 return rc;
6020
6021         if (set_rss)
6022                 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6023         req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6024         return hwrm_req_send(bp, req);
6025 }
6026
6027 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
6028 {
6029         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6030         struct hwrm_vnic_rss_cfg_input *req;
6031         dma_addr_t ring_tbl_map;
6032         u32 i, nr_ctxs;
6033         int rc;
6034
6035         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6036         if (rc)
6037                 return rc;
6038
6039         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6040         if (!set_rss)
6041                 return hwrm_req_send(bp, req);
6042
6043         __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6044         ring_tbl_map = vnic->rss_table_dma_addr;
6045         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6046
6047         hwrm_req_hold(bp, req);
6048         for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6049                 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6050                 req->ring_table_pair_index = i;
6051                 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6052                 rc = hwrm_req_send(bp, req);
6053                 if (rc)
6054                         goto exit;
6055         }
6056
6057 exit:
6058         hwrm_req_drop(bp, req);
6059         return rc;
6060 }
6061
6062 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6063 {
6064         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6065         struct hwrm_vnic_rss_qcfg_output *resp;
6066         struct hwrm_vnic_rss_qcfg_input *req;
6067
6068         if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6069                 return;
6070
6071         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6072         /* all contexts configured to same hash_type, zero always exists */
6073         req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6074         resp = hwrm_req_hold(bp, req);
6075         if (!hwrm_req_send(bp, req)) {
6076                 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6077                 bp->rss_hash_delta = 0;
6078         }
6079         hwrm_req_drop(bp, req);
6080 }
6081
6082 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
6083 {
6084         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6085         struct hwrm_vnic_plcmodes_cfg_input *req;
6086         int rc;
6087
6088         rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6089         if (rc)
6090                 return rc;
6091
6092         req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6093         req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6094
6095         if (BNXT_RX_PAGE_MODE(bp)) {
6096                 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6097         } else {
6098                 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6099                                           VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6100                 req->enables |=
6101                         cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6102                 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
6103                 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
6104         }
6105         req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6106         return hwrm_req_send(bp, req);
6107 }
6108
6109 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
6110                                         u16 ctx_idx)
6111 {
6112         struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6113
6114         if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6115                 return;
6116
6117         req->rss_cos_lb_ctx_id =
6118                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
6119
6120         hwrm_req_send(bp, req);
6121         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6122 }
6123
6124 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6125 {
6126         int i, j;
6127
6128         for (i = 0; i < bp->nr_vnics; i++) {
6129                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6130
6131                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6132                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6133                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
6134                 }
6135         }
6136         bp->rsscos_nr_ctxs = 0;
6137 }
6138
6139 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
6140 {
6141         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6142         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6143         int rc;
6144
6145         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6146         if (rc)
6147                 return rc;
6148
6149         resp = hwrm_req_hold(bp, req);
6150         rc = hwrm_req_send(bp, req);
6151         if (!rc)
6152                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
6153                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
6154         hwrm_req_drop(bp, req);
6155
6156         return rc;
6157 }
6158
6159 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6160 {
6161         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6162                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6163         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6164 }
6165
6166 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
6167 {
6168         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6169         struct hwrm_vnic_cfg_input *req;
6170         unsigned int ring = 0, grp_idx;
6171         u16 def_vlan = 0;
6172         int rc;
6173
6174         rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6175         if (rc)
6176                 return rc;
6177
6178         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6179                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6180
6181                 req->default_rx_ring_id =
6182                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6183                 req->default_cmpl_ring_id =
6184                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6185                 req->enables =
6186                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6187                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6188                 goto vnic_mru;
6189         }
6190         req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6191         /* Only RSS support for now TBD: COS & LB */
6192         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6193                 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6194                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6195                                            VNIC_CFG_REQ_ENABLES_MRU);
6196         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6197                 req->rss_rule =
6198                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
6199                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6200                                            VNIC_CFG_REQ_ENABLES_MRU);
6201                 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6202         } else {
6203                 req->rss_rule = cpu_to_le16(0xffff);
6204         }
6205
6206         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6207             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6208                 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6209                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6210         } else {
6211                 req->cos_rule = cpu_to_le16(0xffff);
6212         }
6213
6214         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6215                 ring = 0;
6216         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6217                 ring = vnic_id - 1;
6218         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6219                 ring = bp->rx_nr_rings - 1;
6220
6221         grp_idx = bp->rx_ring[ring].bnapi->index;
6222         req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6223         req->lb_rule = cpu_to_le16(0xffff);
6224 vnic_mru:
6225         req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
6226
6227         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6228 #ifdef CONFIG_BNXT_SRIOV
6229         if (BNXT_VF(bp))
6230                 def_vlan = bp->vf.vlan;
6231 #endif
6232         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6233                 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6234         if (!vnic_id && bnxt_ulp_registered(bp->edev))
6235                 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6236
6237         return hwrm_req_send(bp, req);
6238 }
6239
6240 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
6241 {
6242         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
6243                 struct hwrm_vnic_free_input *req;
6244
6245                 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6246                         return;
6247
6248                 req->vnic_id =
6249                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
6250
6251                 hwrm_req_send(bp, req);
6252                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
6253         }
6254 }
6255
6256 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6257 {
6258         u16 i;
6259
6260         for (i = 0; i < bp->nr_vnics; i++)
6261                 bnxt_hwrm_vnic_free_one(bp, i);
6262 }
6263
6264 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
6265                                 unsigned int start_rx_ring_idx,
6266                                 unsigned int nr_rings)
6267 {
6268         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6269         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6270         struct hwrm_vnic_alloc_output *resp;
6271         struct hwrm_vnic_alloc_input *req;
6272         int rc;
6273
6274         rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6275         if (rc)
6276                 return rc;
6277
6278         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6279                 goto vnic_no_ring_grps;
6280
6281         /* map ring groups to this vnic */
6282         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6283                 grp_idx = bp->rx_ring[i].bnapi->index;
6284                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6285                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6286                                    j, nr_rings);
6287                         break;
6288                 }
6289                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6290         }
6291
6292 vnic_no_ring_grps:
6293         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6294                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6295         if (vnic_id == 0)
6296                 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6297
6298         resp = hwrm_req_hold(bp, req);
6299         rc = hwrm_req_send(bp, req);
6300         if (!rc)
6301                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6302         hwrm_req_drop(bp, req);
6303         return rc;
6304 }
6305
6306 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6307 {
6308         struct hwrm_vnic_qcaps_output *resp;
6309         struct hwrm_vnic_qcaps_input *req;
6310         int rc;
6311
6312         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6313         bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6314         bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6315         if (bp->hwrm_spec_code < 0x10600)
6316                 return 0;
6317
6318         rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6319         if (rc)
6320                 return rc;
6321
6322         resp = hwrm_req_hold(bp, req);
6323         rc = hwrm_req_send(bp, req);
6324         if (!rc) {
6325                 u32 flags = le32_to_cpu(resp->flags);
6326
6327                 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6328                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6329                         bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6330                 if (flags &
6331                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6332                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6333
6334                 /* Older P5 fw before EXT_HW_STATS support did not set
6335                  * VLAN_STRIP_CAP properly.
6336                  */
6337                 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
6338                     (BNXT_CHIP_P5(bp) &&
6339                      !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
6340                         bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
6341                 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
6342                         bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
6343                 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
6344                         bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
6345                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
6346                 if (bp->max_tpa_v2) {
6347                         if (BNXT_CHIP_P5(bp))
6348                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
6349                         else
6350                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
6351                 }
6352                 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
6353                         bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
6354         }
6355         hwrm_req_drop(bp, req);
6356         return rc;
6357 }
6358
6359 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
6360 {
6361         struct hwrm_ring_grp_alloc_output *resp;
6362         struct hwrm_ring_grp_alloc_input *req;
6363         int rc;
6364         u16 i;
6365
6366         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6367                 return 0;
6368
6369         rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
6370         if (rc)
6371                 return rc;
6372
6373         resp = hwrm_req_hold(bp, req);
6374         for (i = 0; i < bp->rx_nr_rings; i++) {
6375                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
6376
6377                 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
6378                 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
6379                 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
6380                 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
6381
6382                 rc = hwrm_req_send(bp, req);
6383
6384                 if (rc)
6385                         break;
6386
6387                 bp->grp_info[grp_idx].fw_grp_id =
6388                         le32_to_cpu(resp->ring_group_id);
6389         }
6390         hwrm_req_drop(bp, req);
6391         return rc;
6392 }
6393
6394 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
6395 {
6396         struct hwrm_ring_grp_free_input *req;
6397         u16 i;
6398
6399         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
6400                 return;
6401
6402         if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
6403                 return;
6404
6405         hwrm_req_hold(bp, req);
6406         for (i = 0; i < bp->cp_nr_rings; i++) {
6407                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
6408                         continue;
6409                 req->ring_group_id =
6410                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
6411
6412                 hwrm_req_send(bp, req);
6413                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
6414         }
6415         hwrm_req_drop(bp, req);
6416 }
6417
6418 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
6419                                     struct bnxt_ring_struct *ring,
6420                                     u32 ring_type, u32 map_index)
6421 {
6422         struct hwrm_ring_alloc_output *resp;
6423         struct hwrm_ring_alloc_input *req;
6424         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
6425         struct bnxt_ring_grp_info *grp_info;
6426         int rc, err = 0;
6427         u16 ring_id;
6428
6429         rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
6430         if (rc)
6431                 goto exit;
6432
6433         req->enables = 0;
6434         if (rmem->nr_pages > 1) {
6435                 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
6436                 /* Page size is in log2 units */
6437                 req->page_size = BNXT_PAGE_SHIFT;
6438                 req->page_tbl_depth = 1;
6439         } else {
6440                 req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
6441         }
6442         req->fbo = 0;
6443         /* Association of ring index with doorbell index and MSIX number */
6444         req->logical_id = cpu_to_le16(map_index);
6445
6446         switch (ring_type) {
6447         case HWRM_RING_ALLOC_TX: {
6448                 struct bnxt_tx_ring_info *txr;
6449
6450                 txr = container_of(ring, struct bnxt_tx_ring_info,
6451                                    tx_ring_struct);
6452                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
6453                 /* Association of transmit ring with completion ring */
6454                 grp_info = &bp->grp_info[ring->grp_idx];
6455                 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
6456                 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
6457                 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6458                 req->queue_id = cpu_to_le16(ring->queue_id);
6459                 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
6460                         req->cmpl_coal_cnt =
6461                                 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
6462                 break;
6463         }
6464         case HWRM_RING_ALLOC_RX:
6465                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
6466                 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
6467                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6468                         u16 flags = 0;
6469
6470                         /* Association of rx ring with stats context */
6471                         grp_info = &bp->grp_info[ring->grp_idx];
6472                         req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
6473                         req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6474                         req->enables |= cpu_to_le32(
6475                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
6476                         if (NET_IP_ALIGN == 2)
6477                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
6478                         req->flags = cpu_to_le16(flags);
6479                 }
6480                 break;
6481         case HWRM_RING_ALLOC_AGG:
6482                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6483                         req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
6484                         /* Association of agg ring with rx ring */
6485                         grp_info = &bp->grp_info[ring->grp_idx];
6486                         req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
6487                         req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
6488                         req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6489                         req->enables |= cpu_to_le32(
6490                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
6491                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
6492                 } else {
6493                         req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
6494                 }
6495                 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
6496                 break;
6497         case HWRM_RING_ALLOC_CMPL:
6498                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
6499                 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
6500                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6501                         /* Association of cp ring with nq */
6502                         grp_info = &bp->grp_info[map_index];
6503                         req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
6504                         req->cq_handle = cpu_to_le64(ring->handle);
6505                         req->enables |= cpu_to_le32(
6506                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
6507                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
6508                         req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
6509                 }
6510                 break;
6511         case HWRM_RING_ALLOC_NQ:
6512                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
6513                 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
6514                 if (bp->flags & BNXT_FLAG_USING_MSIX)
6515                         req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
6516                 break;
6517         default:
6518                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
6519                            ring_type);
6520                 return -1;
6521         }
6522
6523         resp = hwrm_req_hold(bp, req);
6524         rc = hwrm_req_send(bp, req);
6525         err = le16_to_cpu(resp->error_code);
6526         ring_id = le16_to_cpu(resp->ring_id);
6527         hwrm_req_drop(bp, req);
6528
6529 exit:
6530         if (rc || err) {
6531                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
6532                            ring_type, rc, err);
6533                 return -EIO;
6534         }
6535         ring->fw_ring_id = ring_id;
6536         return rc;
6537 }
6538
6539 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
6540 {
6541         int rc;
6542
6543         if (BNXT_PF(bp)) {
6544                 struct hwrm_func_cfg_input *req;
6545
6546                 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
6547                 if (rc)
6548                         return rc;
6549
6550                 req->fid = cpu_to_le16(0xffff);
6551                 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
6552                 req->async_event_cr = cpu_to_le16(idx);
6553                 return hwrm_req_send(bp, req);
6554         } else {
6555                 struct hwrm_func_vf_cfg_input *req;
6556
6557                 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
6558                 if (rc)
6559                         return rc;
6560
6561                 req->enables =
6562                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
6563                 req->async_event_cr = cpu_to_le16(idx);
6564                 return hwrm_req_send(bp, req);
6565         }
6566 }
6567
6568 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
6569                              u32 ring_type)
6570 {
6571         switch (ring_type) {
6572         case HWRM_RING_ALLOC_TX:
6573                 db->db_ring_mask = bp->tx_ring_mask;
6574                 break;
6575         case HWRM_RING_ALLOC_RX:
6576                 db->db_ring_mask = bp->rx_ring_mask;
6577                 break;
6578         case HWRM_RING_ALLOC_AGG:
6579                 db->db_ring_mask = bp->rx_agg_ring_mask;
6580                 break;
6581         case HWRM_RING_ALLOC_CMPL:
6582         case HWRM_RING_ALLOC_NQ:
6583                 db->db_ring_mask = bp->cp_ring_mask;
6584                 break;
6585         }
6586         if (bp->flags & BNXT_FLAG_CHIP_P7) {
6587                 db->db_epoch_mask = db->db_ring_mask + 1;
6588                 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
6589         }
6590 }
6591
6592 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
6593                         u32 map_idx, u32 xid)
6594 {
6595         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6596                 switch (ring_type) {
6597                 case HWRM_RING_ALLOC_TX:
6598                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
6599                         break;
6600                 case HWRM_RING_ALLOC_RX:
6601                 case HWRM_RING_ALLOC_AGG:
6602                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
6603                         break;
6604                 case HWRM_RING_ALLOC_CMPL:
6605                         db->db_key64 = DBR_PATH_L2;
6606                         break;
6607                 case HWRM_RING_ALLOC_NQ:
6608                         db->db_key64 = DBR_PATH_L2;
6609                         break;
6610                 }
6611                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
6612
6613                 if (bp->flags & BNXT_FLAG_CHIP_P7)
6614                         db->db_key64 |= DBR_VALID;
6615
6616                 db->doorbell = bp->bar1 + bp->db_offset;
6617         } else {
6618                 db->doorbell = bp->bar1 + map_idx * 0x80;
6619                 switch (ring_type) {
6620                 case HWRM_RING_ALLOC_TX:
6621                         db->db_key32 = DB_KEY_TX;
6622                         break;
6623                 case HWRM_RING_ALLOC_RX:
6624                 case HWRM_RING_ALLOC_AGG:
6625                         db->db_key32 = DB_KEY_RX;
6626                         break;
6627                 case HWRM_RING_ALLOC_CMPL:
6628                         db->db_key32 = DB_KEY_CP;
6629                         break;
6630                 }
6631         }
6632         bnxt_set_db_mask(bp, db, ring_type);
6633 }
6634
6635 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
6636 {
6637         bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
6638         int i, rc = 0;
6639         u32 type;
6640
6641         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6642                 type = HWRM_RING_ALLOC_NQ;
6643         else
6644                 type = HWRM_RING_ALLOC_CMPL;
6645         for (i = 0; i < bp->cp_nr_rings; i++) {
6646                 struct bnxt_napi *bnapi = bp->bnapi[i];
6647                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6648                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
6649                 u32 map_idx = ring->map_idx;
6650                 unsigned int vector;
6651
6652                 vector = bp->irq_tbl[map_idx].vector;
6653                 disable_irq_nosync(vector);
6654                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6655                 if (rc) {
6656                         enable_irq(vector);
6657                         goto err_out;
6658                 }
6659                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
6660                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
6661                 enable_irq(vector);
6662                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
6663
6664                 if (!i) {
6665                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
6666                         if (rc)
6667                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
6668                 }
6669         }
6670
6671         type = HWRM_RING_ALLOC_TX;
6672         for (i = 0; i < bp->tx_nr_rings; i++) {
6673                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6674                 struct bnxt_ring_struct *ring;
6675                 u32 map_idx;
6676
6677                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6678                         struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
6679                         struct bnxt_napi *bnapi = txr->bnapi;
6680                         u32 type2 = HWRM_RING_ALLOC_CMPL;
6681
6682                         ring = &cpr2->cp_ring_struct;
6683                         ring->handle = BNXT_SET_NQ_HDL(cpr2);
6684                         map_idx = bnapi->index;
6685                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
6686                         if (rc)
6687                                 goto err_out;
6688                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
6689                                     ring->fw_ring_id);
6690                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
6691                 }
6692                 ring = &txr->tx_ring_struct;
6693                 map_idx = i;
6694                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6695                 if (rc)
6696                         goto err_out;
6697                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
6698         }
6699
6700         type = HWRM_RING_ALLOC_RX;
6701         for (i = 0; i < bp->rx_nr_rings; i++) {
6702                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6703                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6704                 struct bnxt_napi *bnapi = rxr->bnapi;
6705                 u32 map_idx = bnapi->index;
6706
6707                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6708                 if (rc)
6709                         goto err_out;
6710                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
6711                 /* If we have agg rings, post agg buffers first. */
6712                 if (!agg_rings)
6713                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
6714                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
6715                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6716                         struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
6717                         u32 type2 = HWRM_RING_ALLOC_CMPL;
6718
6719                         ring = &cpr2->cp_ring_struct;
6720                         ring->handle = BNXT_SET_NQ_HDL(cpr2);
6721                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
6722                         if (rc)
6723                                 goto err_out;
6724                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
6725                                     ring->fw_ring_id);
6726                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
6727                 }
6728         }
6729
6730         if (agg_rings) {
6731                 type = HWRM_RING_ALLOC_AGG;
6732                 for (i = 0; i < bp->rx_nr_rings; i++) {
6733                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6734                         struct bnxt_ring_struct *ring =
6735                                                 &rxr->rx_agg_ring_struct;
6736                         u32 grp_idx = ring->grp_idx;
6737                         u32 map_idx = grp_idx + bp->rx_nr_rings;
6738
6739                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6740                         if (rc)
6741                                 goto err_out;
6742
6743                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
6744                                     ring->fw_ring_id);
6745                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
6746                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
6747                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
6748                 }
6749         }
6750 err_out:
6751         return rc;
6752 }
6753
6754 static int hwrm_ring_free_send_msg(struct bnxt *bp,
6755                                    struct bnxt_ring_struct *ring,
6756                                    u32 ring_type, int cmpl_ring_id)
6757 {
6758         struct hwrm_ring_free_output *resp;
6759         struct hwrm_ring_free_input *req;
6760         u16 error_code = 0;
6761         int rc;
6762
6763         if (BNXT_NO_FW_ACCESS(bp))
6764                 return 0;
6765
6766         rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
6767         if (rc)
6768                 goto exit;
6769
6770         req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
6771         req->ring_type = ring_type;
6772         req->ring_id = cpu_to_le16(ring->fw_ring_id);
6773
6774         resp = hwrm_req_hold(bp, req);
6775         rc = hwrm_req_send(bp, req);
6776         error_code = le16_to_cpu(resp->error_code);
6777         hwrm_req_drop(bp, req);
6778 exit:
6779         if (rc || error_code) {
6780                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
6781                            ring_type, rc, error_code);
6782                 return -EIO;
6783         }
6784         return 0;
6785 }
6786
6787 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
6788 {
6789         u32 type;
6790         int i;
6791
6792         if (!bp->bnapi)
6793                 return;
6794
6795         for (i = 0; i < bp->tx_nr_rings; i++) {
6796                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6797                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
6798
6799                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6800                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
6801
6802                         hwrm_ring_free_send_msg(bp, ring,
6803                                                 RING_FREE_REQ_RING_TYPE_TX,
6804                                                 close_path ? cmpl_ring_id :
6805                                                 INVALID_HW_RING_ID);
6806                         ring->fw_ring_id = INVALID_HW_RING_ID;
6807                 }
6808         }
6809
6810         for (i = 0; i < bp->rx_nr_rings; i++) {
6811                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6812                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6813                 u32 grp_idx = rxr->bnapi->index;
6814
6815                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6816                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6817
6818                         hwrm_ring_free_send_msg(bp, ring,
6819                                                 RING_FREE_REQ_RING_TYPE_RX,
6820                                                 close_path ? cmpl_ring_id :
6821                                                 INVALID_HW_RING_ID);
6822                         ring->fw_ring_id = INVALID_HW_RING_ID;
6823                         bp->grp_info[grp_idx].rx_fw_ring_id =
6824                                 INVALID_HW_RING_ID;
6825                 }
6826         }
6827
6828         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6829                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
6830         else
6831                 type = RING_FREE_REQ_RING_TYPE_RX;
6832         for (i = 0; i < bp->rx_nr_rings; i++) {
6833                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6834                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
6835                 u32 grp_idx = rxr->bnapi->index;
6836
6837                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6838                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6839
6840                         hwrm_ring_free_send_msg(bp, ring, type,
6841                                                 close_path ? cmpl_ring_id :
6842                                                 INVALID_HW_RING_ID);
6843                         ring->fw_ring_id = INVALID_HW_RING_ID;
6844                         bp->grp_info[grp_idx].agg_fw_ring_id =
6845                                 INVALID_HW_RING_ID;
6846                 }
6847         }
6848
6849         /* The completion rings are about to be freed.  After that the
6850          * IRQ doorbell will not work anymore.  So we need to disable
6851          * IRQ here.
6852          */
6853         bnxt_disable_int_sync(bp);
6854
6855         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6856                 type = RING_FREE_REQ_RING_TYPE_NQ;
6857         else
6858                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
6859         for (i = 0; i < bp->cp_nr_rings; i++) {
6860                 struct bnxt_napi *bnapi = bp->bnapi[i];
6861                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6862                 struct bnxt_ring_struct *ring;
6863                 int j;
6864
6865                 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
6866                         struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
6867
6868                         ring = &cpr2->cp_ring_struct;
6869                         if (ring->fw_ring_id == INVALID_HW_RING_ID)
6870                                 continue;
6871                         hwrm_ring_free_send_msg(bp, ring,
6872                                                 RING_FREE_REQ_RING_TYPE_L2_CMPL,
6873                                                 INVALID_HW_RING_ID);
6874                         ring->fw_ring_id = INVALID_HW_RING_ID;
6875                 }
6876                 ring = &cpr->cp_ring_struct;
6877                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6878                         hwrm_ring_free_send_msg(bp, ring, type,
6879                                                 INVALID_HW_RING_ID);
6880                         ring->fw_ring_id = INVALID_HW_RING_ID;
6881                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
6882                 }
6883         }
6884 }
6885
6886 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6887                              bool shared);
6888 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6889                            bool shared);
6890
6891 static int bnxt_hwrm_get_rings(struct bnxt *bp)
6892 {
6893         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6894         struct hwrm_func_qcfg_output *resp;
6895         struct hwrm_func_qcfg_input *req;
6896         int rc;
6897
6898         if (bp->hwrm_spec_code < 0x10601)
6899                 return 0;
6900
6901         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6902         if (rc)
6903                 return rc;
6904
6905         req->fid = cpu_to_le16(0xffff);
6906         resp = hwrm_req_hold(bp, req);
6907         rc = hwrm_req_send(bp, req);
6908         if (rc) {
6909                 hwrm_req_drop(bp, req);
6910                 return rc;
6911         }
6912
6913         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6914         if (BNXT_NEW_RM(bp)) {
6915                 u16 cp, stats;
6916
6917                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6918                 hw_resc->resv_hw_ring_grps =
6919                         le32_to_cpu(resp->alloc_hw_ring_grps);
6920                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6921                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6922                 stats = le16_to_cpu(resp->alloc_stat_ctx);
6923                 hw_resc->resv_irqs = cp;
6924                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6925                         int rx = hw_resc->resv_rx_rings;
6926                         int tx = hw_resc->resv_tx_rings;
6927
6928                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6929                                 rx >>= 1;
6930                         if (cp < (rx + tx)) {
6931                                 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
6932                                 if (rc)
6933                                         goto get_rings_exit;
6934                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6935                                         rx <<= 1;
6936                                 hw_resc->resv_rx_rings = rx;
6937                                 hw_resc->resv_tx_rings = tx;
6938                         }
6939                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6940                         hw_resc->resv_hw_ring_grps = rx;
6941                 }
6942                 hw_resc->resv_cp_rings = cp;
6943                 hw_resc->resv_stat_ctxs = stats;
6944         }
6945 get_rings_exit:
6946         hwrm_req_drop(bp, req);
6947         return rc;
6948 }
6949
6950 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6951 {
6952         struct hwrm_func_qcfg_output *resp;
6953         struct hwrm_func_qcfg_input *req;
6954         int rc;
6955
6956         if (bp->hwrm_spec_code < 0x10601)
6957                 return 0;
6958
6959         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6960         if (rc)
6961                 return rc;
6962
6963         req->fid = cpu_to_le16(fid);
6964         resp = hwrm_req_hold(bp, req);
6965         rc = hwrm_req_send(bp, req);
6966         if (!rc)
6967                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6968
6969         hwrm_req_drop(bp, req);
6970         return rc;
6971 }
6972
6973 static bool bnxt_rfs_supported(struct bnxt *bp);
6974
6975 static struct hwrm_func_cfg_input *
6976 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6977                              int ring_grps, int cp_rings, int stats, int vnics)
6978 {
6979         struct hwrm_func_cfg_input *req;
6980         u32 enables = 0;
6981
6982         if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
6983                 return NULL;
6984
6985         req->fid = cpu_to_le16(0xffff);
6986         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6987         req->num_tx_rings = cpu_to_le16(tx_rings);
6988         if (BNXT_NEW_RM(bp)) {
6989                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6990                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6991                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6992                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6993                         enables |= tx_rings + ring_grps ?
6994                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6995                         enables |= rx_rings ?
6996                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6997                 } else {
6998                         enables |= cp_rings ?
6999                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7000                         enables |= ring_grps ?
7001                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
7002                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7003                 }
7004                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7005
7006                 req->num_rx_rings = cpu_to_le16(rx_rings);
7007                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7008                         u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
7009
7010                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
7011                         req->num_msix = cpu_to_le16(cp_rings);
7012                         req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
7013                 } else {
7014                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
7015                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
7016                         req->num_rsscos_ctxs = cpu_to_le16(1);
7017                         if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
7018                             bnxt_rfs_supported(bp))
7019                                 req->num_rsscos_ctxs =
7020                                         cpu_to_le16(ring_grps + 1);
7021                 }
7022                 req->num_stat_ctxs = cpu_to_le16(stats);
7023                 req->num_vnics = cpu_to_le16(vnics);
7024         }
7025         req->enables = cpu_to_le32(enables);
7026         return req;
7027 }
7028
7029 static struct hwrm_func_vf_cfg_input *
7030 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
7031                              int ring_grps, int cp_rings, int stats, int vnics)
7032 {
7033         struct hwrm_func_vf_cfg_input *req;
7034         u32 enables = 0;
7035
7036         if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7037                 return NULL;
7038
7039         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7040         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7041                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7042         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7043         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7044                 enables |= tx_rings + ring_grps ?
7045                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7046         } else {
7047                 enables |= cp_rings ?
7048                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7049                 enables |= ring_grps ?
7050                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7051         }
7052         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7053         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7054
7055         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7056         req->num_tx_rings = cpu_to_le16(tx_rings);
7057         req->num_rx_rings = cpu_to_le16(rx_rings);
7058         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7059                 u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
7060
7061                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
7062                 req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
7063         } else {
7064                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
7065                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
7066                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
7067         }
7068         req->num_stat_ctxs = cpu_to_le16(stats);
7069         req->num_vnics = cpu_to_le16(vnics);
7070
7071         req->enables = cpu_to_le32(enables);
7072         return req;
7073 }
7074
7075 static int
7076 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
7077                            int ring_grps, int cp_rings, int stats, int vnics)
7078 {
7079         struct hwrm_func_cfg_input *req;
7080         int rc;
7081
7082         req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
7083                                            cp_rings, stats, vnics);
7084         if (!req)
7085                 return -ENOMEM;
7086
7087         if (!req->enables) {
7088                 hwrm_req_drop(bp, req);
7089                 return 0;
7090         }
7091
7092         rc = hwrm_req_send(bp, req);
7093         if (rc)
7094                 return rc;
7095
7096         if (bp->hwrm_spec_code < 0x10601)
7097                 bp->hw_resc.resv_tx_rings = tx_rings;
7098
7099         return bnxt_hwrm_get_rings(bp);
7100 }
7101
7102 static int
7103 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
7104                            int ring_grps, int cp_rings, int stats, int vnics)
7105 {
7106         struct hwrm_func_vf_cfg_input *req;
7107         int rc;
7108
7109         if (!BNXT_NEW_RM(bp)) {
7110                 bp->hw_resc.resv_tx_rings = tx_rings;
7111                 return 0;
7112         }
7113
7114         req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
7115                                            cp_rings, stats, vnics);
7116         if (!req)
7117                 return -ENOMEM;
7118
7119         rc = hwrm_req_send(bp, req);
7120         if (rc)
7121                 return rc;
7122
7123         return bnxt_hwrm_get_rings(bp);
7124 }
7125
7126 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
7127                                    int cp, int stat, int vnic)
7128 {
7129         if (BNXT_PF(bp))
7130                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
7131                                                   vnic);
7132         else
7133                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
7134                                                   vnic);
7135 }
7136
7137 int bnxt_nq_rings_in_use(struct bnxt *bp)
7138 {
7139         int cp = bp->cp_nr_rings;
7140         int ulp_msix, ulp_base;
7141
7142         ulp_msix = bnxt_get_ulp_msix_num(bp);
7143         if (ulp_msix) {
7144                 ulp_base = bnxt_get_ulp_msix_base(bp);
7145                 cp += ulp_msix;
7146                 if ((ulp_base + ulp_msix) > cp)
7147                         cp = ulp_base + ulp_msix;
7148         }
7149         return cp;
7150 }
7151
7152 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7153 {
7154         int cp;
7155
7156         if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7157                 return bnxt_nq_rings_in_use(bp);
7158
7159         cp = bp->tx_nr_rings + bp->rx_nr_rings;
7160         return cp;
7161 }
7162
7163 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7164 {
7165         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
7166         int cp = bp->cp_nr_rings;
7167
7168         if (!ulp_stat)
7169                 return cp;
7170
7171         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
7172                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
7173
7174         return cp + ulp_stat;
7175 }
7176
7177 /* Check if a default RSS map needs to be setup.  This function is only
7178  * used on older firmware that does not require reserving RX rings.
7179  */
7180 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7181 {
7182         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7183
7184         /* The RSS map is valid for RX rings set to resv_rx_rings */
7185         if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7186                 hw_resc->resv_rx_rings = bp->rx_nr_rings;
7187                 if (!netif_is_rxfh_configured(bp->dev))
7188                         bnxt_set_dflt_rss_indir_tbl(bp);
7189         }
7190 }
7191
7192 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7193 {
7194         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7195         int cp = bnxt_cp_rings_in_use(bp);
7196         int nq = bnxt_nq_rings_in_use(bp);
7197         int rx = bp->rx_nr_rings, stat;
7198         int vnic = 1, grp = rx;
7199
7200         if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
7201             bp->hwrm_spec_code >= 0x10601)
7202                 return true;
7203
7204         /* Old firmware does not need RX ring reservations but we still
7205          * need to setup a default RSS map when needed.  With new firmware
7206          * we go through RX ring reservations first and then set up the
7207          * RSS map for the successfully reserved RX rings when needed.
7208          */
7209         if (!BNXT_NEW_RM(bp)) {
7210                 bnxt_check_rss_tbl_no_rmgr(bp);
7211                 return false;
7212         }
7213         if ((bp->flags & BNXT_FLAG_RFS) &&
7214             !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7215                 vnic = rx + 1;
7216         if (bp->flags & BNXT_FLAG_AGG_RINGS)
7217                 rx <<= 1;
7218         stat = bnxt_get_func_stat_ctxs(bp);
7219         if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
7220             hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
7221             (hw_resc->resv_hw_ring_grps != grp &&
7222              !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7223                 return true;
7224         if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7225             hw_resc->resv_irqs != nq)
7226                 return true;
7227         return false;
7228 }
7229
7230 static int __bnxt_reserve_rings(struct bnxt *bp)
7231 {
7232         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7233         int cp = bnxt_nq_rings_in_use(bp);
7234         int tx = bp->tx_nr_rings;
7235         int rx = bp->rx_nr_rings;
7236         int grp, rx_rings, rc;
7237         int vnic = 1, stat;
7238         bool sh = false;
7239         int tx_cp;
7240
7241         if (!bnxt_need_reserve_rings(bp))
7242                 return 0;
7243
7244         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7245                 sh = true;
7246         if ((bp->flags & BNXT_FLAG_RFS) &&
7247             !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7248                 vnic = rx + 1;
7249         if (bp->flags & BNXT_FLAG_AGG_RINGS)
7250                 rx <<= 1;
7251         grp = bp->rx_nr_rings;
7252         stat = bnxt_get_func_stat_ctxs(bp);
7253
7254         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
7255         if (rc)
7256                 return rc;
7257
7258         tx = hw_resc->resv_tx_rings;
7259         if (BNXT_NEW_RM(bp)) {
7260                 rx = hw_resc->resv_rx_rings;
7261                 cp = hw_resc->resv_irqs;
7262                 grp = hw_resc->resv_hw_ring_grps;
7263                 vnic = hw_resc->resv_vnics;
7264                 stat = hw_resc->resv_stat_ctxs;
7265         }
7266
7267         rx_rings = rx;
7268         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7269                 if (rx >= 2) {
7270                         rx_rings = rx >> 1;
7271                 } else {
7272                         if (netif_running(bp->dev))
7273                                 return -ENOMEM;
7274
7275                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7276                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7277                         bp->dev->hw_features &= ~NETIF_F_LRO;
7278                         bp->dev->features &= ~NETIF_F_LRO;
7279                         bnxt_set_ring_params(bp);
7280                 }
7281         }
7282         rx_rings = min_t(int, rx_rings, grp);
7283         cp = min_t(int, cp, bp->cp_nr_rings);
7284         if (stat > bnxt_get_ulp_stat_ctxs(bp))
7285                 stat -= bnxt_get_ulp_stat_ctxs(bp);
7286         cp = min_t(int, cp, stat);
7287         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
7288         if (bp->flags & BNXT_FLAG_AGG_RINGS)
7289                 rx = rx_rings << 1;
7290         tx_cp = bnxt_num_tx_to_cp(bp, tx);
7291         cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
7292         bp->tx_nr_rings = tx;
7293
7294         /* If we cannot reserve all the RX rings, reset the RSS map only
7295          * if absolutely necessary
7296          */
7297         if (rx_rings != bp->rx_nr_rings) {
7298                 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
7299                             rx_rings, bp->rx_nr_rings);
7300                 if (netif_is_rxfh_configured(bp->dev) &&
7301                     (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
7302                      bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
7303                      bnxt_get_max_rss_ring(bp) >= rx_rings)) {
7304                         netdev_warn(bp->dev, "RSS table entries reverting to default\n");
7305                         bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
7306                 }
7307         }
7308         bp->rx_nr_rings = rx_rings;
7309         bp->cp_nr_rings = cp;
7310
7311         if (!tx || !rx || !cp || !grp || !vnic || !stat)
7312                 return -ENOMEM;
7313
7314         if (!netif_is_rxfh_configured(bp->dev))
7315                 bnxt_set_dflt_rss_indir_tbl(bp);
7316
7317         return rc;
7318 }
7319
7320 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
7321                                     int ring_grps, int cp_rings, int stats,
7322                                     int vnics)
7323 {
7324         struct hwrm_func_vf_cfg_input *req;
7325         u32 flags;
7326
7327         if (!BNXT_NEW_RM(bp))
7328                 return 0;
7329
7330         req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
7331                                            cp_rings, stats, vnics);
7332         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
7333                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7334                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7335                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7336                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
7337                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
7338         if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7339                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7340
7341         req->flags = cpu_to_le32(flags);
7342         return hwrm_req_send_silent(bp, req);
7343 }
7344
7345 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
7346                                     int ring_grps, int cp_rings, int stats,
7347                                     int vnics)
7348 {
7349         struct hwrm_func_cfg_input *req;
7350         u32 flags;
7351
7352         req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
7353                                            cp_rings, stats, vnics);
7354         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
7355         if (BNXT_NEW_RM(bp)) {
7356                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7357                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7358                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7359                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
7360                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7361                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
7362                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
7363                 else
7364                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7365         }
7366
7367         req->flags = cpu_to_le32(flags);
7368         return hwrm_req_send_silent(bp, req);
7369 }
7370
7371 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
7372                                  int ring_grps, int cp_rings, int stats,
7373                                  int vnics)
7374 {
7375         if (bp->hwrm_spec_code < 0x10801)
7376                 return 0;
7377
7378         if (BNXT_PF(bp))
7379                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
7380                                                 ring_grps, cp_rings, stats,
7381                                                 vnics);
7382
7383         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
7384                                         cp_rings, stats, vnics);
7385 }
7386
7387 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
7388 {
7389         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7390         struct hwrm_ring_aggint_qcaps_output *resp;
7391         struct hwrm_ring_aggint_qcaps_input *req;
7392         int rc;
7393
7394         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
7395         coal_cap->num_cmpl_dma_aggr_max = 63;
7396         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
7397         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
7398         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
7399         coal_cap->int_lat_tmr_min_max = 65535;
7400         coal_cap->int_lat_tmr_max_max = 65535;
7401         coal_cap->num_cmpl_aggr_int_max = 65535;
7402         coal_cap->timer_units = 80;
7403
7404         if (bp->hwrm_spec_code < 0x10902)
7405                 return;
7406
7407         if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
7408                 return;
7409
7410         resp = hwrm_req_hold(bp, req);
7411         rc = hwrm_req_send_silent(bp, req);
7412         if (!rc) {
7413                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
7414                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
7415                 coal_cap->num_cmpl_dma_aggr_max =
7416                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
7417                 coal_cap->num_cmpl_dma_aggr_during_int_max =
7418                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
7419                 coal_cap->cmpl_aggr_dma_tmr_max =
7420                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
7421                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
7422                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
7423                 coal_cap->int_lat_tmr_min_max =
7424                         le16_to_cpu(resp->int_lat_tmr_min_max);
7425                 coal_cap->int_lat_tmr_max_max =
7426                         le16_to_cpu(resp->int_lat_tmr_max_max);
7427                 coal_cap->num_cmpl_aggr_int_max =
7428                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
7429                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
7430         }
7431         hwrm_req_drop(bp, req);
7432 }
7433
7434 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
7435 {
7436         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7437
7438         return usec * 1000 / coal_cap->timer_units;
7439 }
7440
7441 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
7442         struct bnxt_coal *hw_coal,
7443         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7444 {
7445         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7446         u16 val, tmr, max, flags = hw_coal->flags;
7447         u32 cmpl_params = coal_cap->cmpl_params;
7448
7449         max = hw_coal->bufs_per_record * 128;
7450         if (hw_coal->budget)
7451                 max = hw_coal->bufs_per_record * hw_coal->budget;
7452         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
7453
7454         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
7455         req->num_cmpl_aggr_int = cpu_to_le16(val);
7456
7457         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
7458         req->num_cmpl_dma_aggr = cpu_to_le16(val);
7459
7460         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
7461                       coal_cap->num_cmpl_dma_aggr_during_int_max);
7462         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
7463
7464         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
7465         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
7466         req->int_lat_tmr_max = cpu_to_le16(tmr);
7467
7468         /* min timer set to 1/2 of interrupt timer */
7469         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
7470                 val = tmr / 2;
7471                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
7472                 req->int_lat_tmr_min = cpu_to_le16(val);
7473                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
7474         }
7475
7476         /* buf timer set to 1/4 of interrupt timer */
7477         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
7478         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
7479
7480         if (cmpl_params &
7481             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
7482                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
7483                 val = clamp_t(u16, tmr, 1,
7484                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
7485                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
7486                 req->enables |=
7487                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
7488         }
7489
7490         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
7491             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
7492                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
7493         req->flags = cpu_to_le16(flags);
7494         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
7495 }
7496
7497 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
7498                                    struct bnxt_coal *hw_coal)
7499 {
7500         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
7501         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7502         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7503         u32 nq_params = coal_cap->nq_params;
7504         u16 tmr;
7505         int rc;
7506
7507         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
7508                 return 0;
7509
7510         rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7511         if (rc)
7512                 return rc;
7513
7514         req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
7515         req->flags =
7516                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
7517
7518         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
7519         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
7520         req->int_lat_tmr_min = cpu_to_le16(tmr);
7521         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
7522         return hwrm_req_send(bp, req);
7523 }
7524
7525 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
7526 {
7527         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
7528         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7529         struct bnxt_coal coal;
7530         int rc;
7531
7532         /* Tick values in micro seconds.
7533          * 1 coal_buf x bufs_per_record = 1 completion record.
7534          */
7535         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
7536
7537         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
7538         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
7539
7540         if (!bnapi->rx_ring)
7541                 return -ENODEV;
7542
7543         rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7544         if (rc)
7545                 return rc;
7546
7547         bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
7548
7549         req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
7550
7551         return hwrm_req_send(bp, req_rx);
7552 }
7553
7554 static int
7555 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
7556                       struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7557 {
7558         u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
7559
7560         req->ring_id = cpu_to_le16(ring_id);
7561         return hwrm_req_send(bp, req);
7562 }
7563
7564 static int
7565 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
7566                       struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7567 {
7568         struct bnxt_tx_ring_info *txr;
7569         int i, rc;
7570
7571         bnxt_for_each_napi_tx(i, bnapi, txr) {
7572                 u16 ring_id;
7573
7574                 ring_id = bnxt_cp_ring_for_tx(bp, txr);
7575                 req->ring_id = cpu_to_le16(ring_id);
7576                 rc = hwrm_req_send(bp, req);
7577                 if (rc)
7578                         return rc;
7579                 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7580                         return 0;
7581         }
7582         return 0;
7583 }
7584
7585 int bnxt_hwrm_set_coal(struct bnxt *bp)
7586 {
7587         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
7588         int i, rc;
7589
7590         rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7591         if (rc)
7592                 return rc;
7593
7594         rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7595         if (rc) {
7596                 hwrm_req_drop(bp, req_rx);
7597                 return rc;
7598         }
7599
7600         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
7601         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
7602
7603         hwrm_req_hold(bp, req_rx);
7604         hwrm_req_hold(bp, req_tx);
7605         for (i = 0; i < bp->cp_nr_rings; i++) {
7606                 struct bnxt_napi *bnapi = bp->bnapi[i];
7607                 struct bnxt_coal *hw_coal;
7608
7609                 if (!bnapi->rx_ring)
7610                         rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
7611                 else
7612                         rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
7613                 if (rc)
7614                         break;
7615
7616                 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7617                         continue;
7618
7619                 if (bnapi->rx_ring && bnapi->tx_ring[0]) {
7620                         rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
7621                         if (rc)
7622                                 break;
7623                 }
7624                 if (bnapi->rx_ring)
7625                         hw_coal = &bp->rx_coal;
7626                 else
7627                         hw_coal = &bp->tx_coal;
7628                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
7629         }
7630         hwrm_req_drop(bp, req_rx);
7631         hwrm_req_drop(bp, req_tx);
7632         return rc;
7633 }
7634
7635 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
7636 {
7637         struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
7638         struct hwrm_stat_ctx_free_input *req;
7639         int i;
7640
7641         if (!bp->bnapi)
7642                 return;
7643
7644         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7645                 return;
7646
7647         if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
7648                 return;
7649         if (BNXT_FW_MAJ(bp) <= 20) {
7650                 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
7651                         hwrm_req_drop(bp, req);
7652                         return;
7653                 }
7654                 hwrm_req_hold(bp, req0);
7655         }
7656         hwrm_req_hold(bp, req);
7657         for (i = 0; i < bp->cp_nr_rings; i++) {
7658                 struct bnxt_napi *bnapi = bp->bnapi[i];
7659                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7660
7661                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
7662                         req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
7663                         if (req0) {
7664                                 req0->stat_ctx_id = req->stat_ctx_id;
7665                                 hwrm_req_send(bp, req0);
7666                         }
7667                         hwrm_req_send(bp, req);
7668
7669                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
7670                 }
7671         }
7672         hwrm_req_drop(bp, req);
7673         if (req0)
7674                 hwrm_req_drop(bp, req0);
7675 }
7676
7677 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
7678 {
7679         struct hwrm_stat_ctx_alloc_output *resp;
7680         struct hwrm_stat_ctx_alloc_input *req;
7681         int rc, i;
7682
7683         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7684                 return 0;
7685
7686         rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
7687         if (rc)
7688                 return rc;
7689
7690         req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
7691         req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
7692
7693         resp = hwrm_req_hold(bp, req);
7694         for (i = 0; i < bp->cp_nr_rings; i++) {
7695                 struct bnxt_napi *bnapi = bp->bnapi[i];
7696                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7697
7698                 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
7699
7700                 rc = hwrm_req_send(bp, req);
7701                 if (rc)
7702                         break;
7703
7704                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
7705
7706                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
7707         }
7708         hwrm_req_drop(bp, req);
7709         return rc;
7710 }
7711
7712 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
7713 {
7714         struct hwrm_func_qcfg_output *resp;
7715         struct hwrm_func_qcfg_input *req;
7716         u16 flags;
7717         int rc;
7718
7719         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7720         if (rc)
7721                 return rc;
7722
7723         req->fid = cpu_to_le16(0xffff);
7724         resp = hwrm_req_hold(bp, req);
7725         rc = hwrm_req_send(bp, req);
7726         if (rc)
7727                 goto func_qcfg_exit;
7728
7729 #ifdef CONFIG_BNXT_SRIOV
7730         if (BNXT_VF(bp)) {
7731                 struct bnxt_vf_info *vf = &bp->vf;
7732
7733                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
7734         } else {
7735                 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
7736         }
7737 #endif
7738         flags = le16_to_cpu(resp->flags);
7739         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
7740                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
7741                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
7742                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
7743                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
7744         }
7745         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
7746                 bp->flags |= BNXT_FLAG_MULTI_HOST;
7747
7748         if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
7749                 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
7750
7751         switch (resp->port_partition_type) {
7752         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
7753         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
7754         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
7755                 bp->port_partition_type = resp->port_partition_type;
7756                 break;
7757         }
7758         if (bp->hwrm_spec_code < 0x10707 ||
7759             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
7760                 bp->br_mode = BRIDGE_MODE_VEB;
7761         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
7762                 bp->br_mode = BRIDGE_MODE_VEPA;
7763         else
7764                 bp->br_mode = BRIDGE_MODE_UNDEF;
7765
7766         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
7767         if (!bp->max_mtu)
7768                 bp->max_mtu = BNXT_MAX_MTU;
7769
7770         if (bp->db_size)
7771                 goto func_qcfg_exit;
7772
7773         bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
7774         if (BNXT_CHIP_P5(bp)) {
7775                 if (BNXT_PF(bp))
7776                         bp->db_offset = DB_PF_OFFSET_P5;
7777                 else
7778                         bp->db_offset = DB_VF_OFFSET_P5;
7779         }
7780         bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
7781                                  1024);
7782         if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
7783             bp->db_size <= bp->db_offset)
7784                 bp->db_size = pci_resource_len(bp->pdev, 2);
7785
7786 func_qcfg_exit:
7787         hwrm_req_drop(bp, req);
7788         return rc;
7789 }
7790
7791 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
7792                                       u8 init_val, u8 init_offset,
7793                                       bool init_mask_set)
7794 {
7795         ctxm->init_value = init_val;
7796         ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
7797         if (init_mask_set)
7798                 ctxm->init_offset = init_offset * 4;
7799         else
7800                 ctxm->init_value = 0;
7801 }
7802
7803 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
7804 {
7805         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7806         u16 type;
7807
7808         for (type = 0; type < ctx_max; type++) {
7809                 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
7810                 int n = 1;
7811
7812                 if (!ctxm->max_entries)
7813                         continue;
7814
7815                 if (ctxm->instance_bmap)
7816                         n = hweight32(ctxm->instance_bmap);
7817                 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
7818                 if (!ctxm->pg_info)
7819                         return -ENOMEM;
7820         }
7821         return 0;
7822 }
7823
7824 #define BNXT_CTX_INIT_VALID(flags)      \
7825         (!!((flags) &                   \
7826             FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
7827
7828 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
7829 {
7830         struct hwrm_func_backing_store_qcaps_v2_output *resp;
7831         struct hwrm_func_backing_store_qcaps_v2_input *req;
7832         struct bnxt_ctx_mem_info *ctx;
7833         u16 type;
7834         int rc;
7835
7836         rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
7837         if (rc)
7838                 return rc;
7839
7840         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
7841         if (!ctx)
7842                 return -ENOMEM;
7843         bp->ctx = ctx;
7844
7845         resp = hwrm_req_hold(bp, req);
7846
7847         for (type = 0; type < BNXT_CTX_V2_MAX; ) {
7848                 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
7849                 u8 init_val, init_off, i;
7850                 __le32 *p;
7851                 u32 flags;
7852
7853                 req->type = cpu_to_le16(type);
7854                 rc = hwrm_req_send(bp, req);
7855                 if (rc)
7856                         goto ctx_done;
7857                 flags = le32_to_cpu(resp->flags);
7858                 type = le16_to_cpu(resp->next_valid_type);
7859                 if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
7860                         continue;
7861
7862                 ctxm->type = le16_to_cpu(resp->type);
7863                 ctxm->entry_size = le16_to_cpu(resp->entry_size);
7864                 ctxm->flags = flags;
7865                 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
7866                 ctxm->entry_multiple = resp->entry_multiple;
7867                 ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
7868                 ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
7869                 init_val = resp->ctx_init_value;
7870                 init_off = resp->ctx_init_offset;
7871                 bnxt_init_ctx_initializer(ctxm, init_val, init_off,
7872                                           BNXT_CTX_INIT_VALID(flags));
7873                 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
7874                                               BNXT_MAX_SPLIT_ENTRY);
7875                 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
7876                      i++, p++)
7877                         ctxm->split[i] = le32_to_cpu(*p);
7878         }
7879         rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
7880
7881 ctx_done:
7882         hwrm_req_drop(bp, req);
7883         return rc;
7884 }
7885
7886 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
7887 {
7888         struct hwrm_func_backing_store_qcaps_output *resp;
7889         struct hwrm_func_backing_store_qcaps_input *req;
7890         int rc;
7891
7892         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
7893                 return 0;
7894
7895         if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
7896                 return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
7897
7898         rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
7899         if (rc)
7900                 return rc;
7901
7902         resp = hwrm_req_hold(bp, req);
7903         rc = hwrm_req_send_silent(bp, req);
7904         if (!rc) {
7905                 struct bnxt_ctx_mem_type *ctxm;
7906                 struct bnxt_ctx_mem_info *ctx;
7907                 u8 init_val, init_idx = 0;
7908                 u16 init_mask;
7909
7910                 ctx = bp->ctx;
7911                 if (!ctx) {
7912                         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
7913                         if (!ctx) {
7914                                 rc = -ENOMEM;
7915                                 goto ctx_err;
7916                         }
7917                         bp->ctx = ctx;
7918                 }
7919                 init_val = resp->ctx_kind_initializer;
7920                 init_mask = le16_to_cpu(resp->ctx_init_mask);
7921
7922                 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
7923                 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
7924                 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
7925                 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
7926                 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
7927                 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
7928                 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
7929                                           (init_mask & (1 << init_idx++)) != 0);
7930
7931                 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
7932                 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
7933                 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
7934                 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
7935                 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
7936                                           (init_mask & (1 << init_idx++)) != 0);
7937
7938                 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
7939                 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
7940                 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
7941                 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
7942                 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
7943                                           (init_mask & (1 << init_idx++)) != 0);
7944
7945                 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
7946                 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
7947                 ctxm->max_entries = ctxm->vnic_entries +
7948                         le16_to_cpu(resp->vnic_max_ring_table_entries);
7949                 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
7950                 bnxt_init_ctx_initializer(ctxm, init_val,
7951                                           resp->vnic_init_offset,
7952                                           (init_mask & (1 << init_idx++)) != 0);
7953
7954                 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
7955                 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
7956                 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
7957                 bnxt_init_ctx_initializer(ctxm, init_val,
7958                                           resp->stat_init_offset,
7959                                           (init_mask & (1 << init_idx++)) != 0);
7960
7961                 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
7962                 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
7963                 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
7964                 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
7965                 ctxm->entry_multiple = resp->tqm_entries_multiple;
7966                 if (!ctxm->entry_multiple)
7967                         ctxm->entry_multiple = 1;
7968
7969                 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
7970
7971                 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
7972                 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
7973                 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
7974                 ctxm->mrav_num_entries_units =
7975                         le16_to_cpu(resp->mrav_num_entries_units);
7976                 bnxt_init_ctx_initializer(ctxm, init_val,
7977                                           resp->mrav_init_offset,
7978                                           (init_mask & (1 << init_idx++)) != 0);
7979
7980                 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
7981                 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
7982                 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
7983
7984                 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
7985                 if (!ctx->tqm_fp_rings_count)
7986                         ctx->tqm_fp_rings_count = bp->max_q;
7987                 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
7988                         ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
7989
7990                 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
7991                 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
7992                 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
7993
7994                 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
7995         } else {
7996                 rc = 0;
7997         }
7998 ctx_err:
7999         hwrm_req_drop(bp, req);
8000         return rc;
8001 }
8002
8003 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8004                                   __le64 *pg_dir)
8005 {
8006         if (!rmem->nr_pages)
8007                 return;
8008
8009         BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8010         if (rmem->depth >= 1) {
8011                 if (rmem->depth == 2)
8012                         *pg_attr |= 2;
8013                 else
8014                         *pg_attr |= 1;
8015                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8016         } else {
8017                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8018         }
8019 }
8020
8021 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
8022         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
8023          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
8024          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
8025          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
8026          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8027
8028 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8029 {
8030         struct hwrm_func_backing_store_cfg_input *req;
8031         struct bnxt_ctx_mem_info *ctx = bp->ctx;
8032         struct bnxt_ctx_pg_info *ctx_pg;
8033         struct bnxt_ctx_mem_type *ctxm;
8034         void **__req = (void **)&req;
8035         u32 req_len = sizeof(*req);
8036         __le32 *num_entries;
8037         __le64 *pg_dir;
8038         u32 flags = 0;
8039         u8 *pg_attr;
8040         u32 ena;
8041         int rc;
8042         int i;
8043
8044         if (!ctx)
8045                 return 0;
8046
8047         if (req_len > bp->hwrm_max_ext_req_len)
8048                 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8049         rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8050         if (rc)
8051                 return rc;
8052
8053         req->enables = cpu_to_le32(enables);
8054         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8055                 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8056                 ctx_pg = ctxm->pg_info;
8057                 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8058                 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8059                 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8060                 req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8061                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8062                                       &req->qpc_pg_size_qpc_lvl,
8063                                       &req->qpc_page_dir);
8064
8065                 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8066                         req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8067         }
8068         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8069                 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8070                 ctx_pg = ctxm->pg_info;
8071                 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8072                 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8073                 req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8074                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8075                                       &req->srq_pg_size_srq_lvl,
8076                                       &req->srq_page_dir);
8077         }
8078         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8079                 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8080                 ctx_pg = ctxm->pg_info;
8081                 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8082                 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8083                 req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8084                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8085                                       &req->cq_pg_size_cq_lvl,
8086                                       &req->cq_page_dir);
8087         }
8088         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8089                 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8090                 ctx_pg = ctxm->pg_info;
8091                 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8092                 req->vnic_num_ring_table_entries =
8093                         cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8094                 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8095                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8096                                       &req->vnic_pg_size_vnic_lvl,
8097                                       &req->vnic_page_dir);
8098         }
8099         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8100                 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8101                 ctx_pg = ctxm->pg_info;
8102                 req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8103                 req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8104                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8105                                       &req->stat_pg_size_stat_lvl,
8106                                       &req->stat_page_dir);
8107         }
8108         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8109                 u32 units;
8110
8111                 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8112                 ctx_pg = ctxm->pg_info;
8113                 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8114                 units = ctxm->mrav_num_entries_units;
8115                 if (units) {
8116                         u32 num_mr, num_ah = ctxm->mrav_av_entries;
8117                         u32 entries;
8118
8119                         num_mr = ctx_pg->entries - num_ah;
8120                         entries = ((num_mr / units) << 16) | (num_ah / units);
8121                         req->mrav_num_entries = cpu_to_le32(entries);
8122                         flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8123                 }
8124                 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8125                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8126                                       &req->mrav_pg_size_mrav_lvl,
8127                                       &req->mrav_page_dir);
8128         }
8129         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8130                 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8131                 ctx_pg = ctxm->pg_info;
8132                 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8133                 req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8134                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8135                                       &req->tim_pg_size_tim_lvl,
8136                                       &req->tim_page_dir);
8137         }
8138         ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8139         for (i = 0, num_entries = &req->tqm_sp_num_entries,
8140              pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8141              pg_dir = &req->tqm_sp_page_dir,
8142              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8143              ctx_pg = ctxm->pg_info;
8144              i < BNXT_MAX_TQM_RINGS;
8145              ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
8146              i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
8147                 if (!(enables & ena))
8148                         continue;
8149
8150                 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
8151                 *num_entries = cpu_to_le32(ctx_pg->entries);
8152                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
8153         }
8154         req->flags = cpu_to_le32(flags);
8155         return hwrm_req_send(bp, req);
8156 }
8157
8158 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
8159                                   struct bnxt_ctx_pg_info *ctx_pg)
8160 {
8161         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8162
8163         rmem->page_size = BNXT_PAGE_SIZE;
8164         rmem->pg_arr = ctx_pg->ctx_pg_arr;
8165         rmem->dma_arr = ctx_pg->ctx_dma_arr;
8166         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
8167         if (rmem->depth >= 1)
8168                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
8169         return bnxt_alloc_ring(bp, rmem);
8170 }
8171
8172 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
8173                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
8174                                   u8 depth, struct bnxt_ctx_mem_type *ctxm)
8175 {
8176         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8177         int rc;
8178
8179         if (!mem_size)
8180                 return -EINVAL;
8181
8182         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8183         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
8184                 ctx_pg->nr_pages = 0;
8185                 return -EINVAL;
8186         }
8187         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
8188                 int nr_tbls, i;
8189
8190                 rmem->depth = 2;
8191                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
8192                                              GFP_KERNEL);
8193                 if (!ctx_pg->ctx_pg_tbl)
8194                         return -ENOMEM;
8195                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
8196                 rmem->nr_pages = nr_tbls;
8197                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8198                 if (rc)
8199                         return rc;
8200                 for (i = 0; i < nr_tbls; i++) {
8201                         struct bnxt_ctx_pg_info *pg_tbl;
8202
8203                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
8204                         if (!pg_tbl)
8205                                 return -ENOMEM;
8206                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
8207                         rmem = &pg_tbl->ring_mem;
8208                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
8209                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
8210                         rmem->depth = 1;
8211                         rmem->nr_pages = MAX_CTX_PAGES;
8212                         rmem->ctx_mem = ctxm;
8213                         if (i == (nr_tbls - 1)) {
8214                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
8215
8216                                 if (rem)
8217                                         rmem->nr_pages = rem;
8218                         }
8219                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
8220                         if (rc)
8221                                 break;
8222                 }
8223         } else {
8224                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8225                 if (rmem->nr_pages > 1 || depth)
8226                         rmem->depth = 1;
8227                 rmem->ctx_mem = ctxm;
8228                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8229         }
8230         return rc;
8231 }
8232
8233 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
8234                                   struct bnxt_ctx_pg_info *ctx_pg)
8235 {
8236         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8237
8238         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
8239             ctx_pg->ctx_pg_tbl) {
8240                 int i, nr_tbls = rmem->nr_pages;
8241
8242                 for (i = 0; i < nr_tbls; i++) {
8243                         struct bnxt_ctx_pg_info *pg_tbl;
8244                         struct bnxt_ring_mem_info *rmem2;
8245
8246                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
8247                         if (!pg_tbl)
8248                                 continue;
8249                         rmem2 = &pg_tbl->ring_mem;
8250                         bnxt_free_ring(bp, rmem2);
8251                         ctx_pg->ctx_pg_arr[i] = NULL;
8252                         kfree(pg_tbl);
8253                         ctx_pg->ctx_pg_tbl[i] = NULL;
8254                 }
8255                 kfree(ctx_pg->ctx_pg_tbl);
8256                 ctx_pg->ctx_pg_tbl = NULL;
8257         }
8258         bnxt_free_ring(bp, rmem);
8259         ctx_pg->nr_pages = 0;
8260 }
8261
8262 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
8263                                    struct bnxt_ctx_mem_type *ctxm, u32 entries,
8264                                    u8 pg_lvl)
8265 {
8266         struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
8267         int i, rc = 0, n = 1;
8268         u32 mem_size;
8269
8270         if (!ctxm->entry_size || !ctx_pg)
8271                 return -EINVAL;
8272         if (ctxm->instance_bmap)
8273                 n = hweight32(ctxm->instance_bmap);
8274         if (ctxm->entry_multiple)
8275                 entries = roundup(entries, ctxm->entry_multiple);
8276         entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
8277         mem_size = entries * ctxm->entry_size;
8278         for (i = 0; i < n && !rc; i++) {
8279                 ctx_pg[i].entries = entries;
8280                 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
8281                                             ctxm->init_value ? ctxm : NULL);
8282         }
8283         return rc;
8284 }
8285
8286 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
8287                                                struct bnxt_ctx_mem_type *ctxm,
8288                                                bool last)
8289 {
8290         struct hwrm_func_backing_store_cfg_v2_input *req;
8291         u32 instance_bmap = ctxm->instance_bmap;
8292         int i, j, rc = 0, n = 1;
8293         __le32 *p;
8294
8295         if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
8296                 return 0;
8297
8298         if (instance_bmap)
8299                 n = hweight32(ctxm->instance_bmap);
8300         else
8301                 instance_bmap = 1;
8302
8303         rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
8304         if (rc)
8305                 return rc;
8306         hwrm_req_hold(bp, req);
8307         req->type = cpu_to_le16(ctxm->type);
8308         req->entry_size = cpu_to_le16(ctxm->entry_size);
8309         req->subtype_valid_cnt = ctxm->split_entry_cnt;
8310         for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
8311                 p[i] = cpu_to_le32(ctxm->split[i]);
8312         for (i = 0, j = 0; j < n && !rc; i++) {
8313                 struct bnxt_ctx_pg_info *ctx_pg;
8314
8315                 if (!(instance_bmap & (1 << i)))
8316                         continue;
8317                 req->instance = cpu_to_le16(i);
8318                 ctx_pg = &ctxm->pg_info[j++];
8319                 if (!ctx_pg->entries)
8320                         continue;
8321                 req->num_entries = cpu_to_le32(ctx_pg->entries);
8322                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8323                                       &req->page_size_pbl_level,
8324                                       &req->page_dir);
8325                 if (last && j == n)
8326                         req->flags =
8327                                 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
8328                 rc = hwrm_req_send(bp, req);
8329         }
8330         hwrm_req_drop(bp, req);
8331         return rc;
8332 }
8333
8334 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
8335 {
8336         struct bnxt_ctx_mem_info *ctx = bp->ctx;
8337         struct bnxt_ctx_mem_type *ctxm;
8338         u16 last_type;
8339         int rc = 0;
8340         u16 type;
8341
8342         if (!ena)
8343                 return 0;
8344         else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
8345                 last_type = BNXT_CTX_MAX - 1;
8346         else
8347                 last_type = BNXT_CTX_L2_MAX - 1;
8348         ctx->ctx_arr[last_type].last = 1;
8349
8350         for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
8351                 ctxm = &ctx->ctx_arr[type];
8352
8353                 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
8354                 if (rc)
8355                         return rc;
8356         }
8357         return 0;
8358 }
8359
8360 void bnxt_free_ctx_mem(struct bnxt *bp)
8361 {
8362         struct bnxt_ctx_mem_info *ctx = bp->ctx;
8363         u16 type;
8364
8365         if (!ctx)
8366                 return;
8367
8368         for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
8369                 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8370                 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
8371                 int i, n = 1;
8372
8373                 if (!ctx_pg)
8374                         continue;
8375                 if (ctxm->instance_bmap)
8376                         n = hweight32(ctxm->instance_bmap);
8377                 for (i = 0; i < n; i++)
8378                         bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
8379
8380                 kfree(ctx_pg);
8381                 ctxm->pg_info = NULL;
8382         }
8383
8384         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
8385         kfree(ctx);
8386         bp->ctx = NULL;
8387 }
8388
8389 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
8390 {
8391         struct bnxt_ctx_mem_type *ctxm;
8392         struct bnxt_ctx_mem_info *ctx;
8393         u32 l2_qps, qp1_qps, max_qps;
8394         u32 ena, entries_sp, entries;
8395         u32 srqs, max_srqs, min;
8396         u32 num_mr, num_ah;
8397         u32 extra_srqs = 0;
8398         u32 extra_qps = 0;
8399         u32 fast_qpmd_qps;
8400         u8 pg_lvl = 1;
8401         int i, rc;
8402
8403         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
8404         if (rc) {
8405                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
8406                            rc);
8407                 return rc;
8408         }
8409         ctx = bp->ctx;
8410         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
8411                 return 0;
8412
8413         ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8414         l2_qps = ctxm->qp_l2_entries;
8415         qp1_qps = ctxm->qp_qp1_entries;
8416         fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
8417         max_qps = ctxm->max_entries;
8418         ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8419         srqs = ctxm->srq_l2_entries;
8420         max_srqs = ctxm->max_entries;
8421         ena = 0;
8422         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
8423                 pg_lvl = 2;
8424                 extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
8425                 /* allocate extra qps if fw supports RoCE fast qp destroy feature */
8426                 extra_qps += fast_qpmd_qps;
8427                 extra_srqs = min_t(u32, 8192, max_srqs - srqs);
8428                 if (fast_qpmd_qps)
8429                         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
8430         }
8431
8432         ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8433         rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
8434                                      pg_lvl);
8435         if (rc)
8436                 return rc;
8437
8438         ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8439         rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
8440         if (rc)
8441                 return rc;
8442
8443         ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8444         rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
8445                                      extra_qps * 2, pg_lvl);
8446         if (rc)
8447                 return rc;
8448
8449         ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8450         rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8451         if (rc)
8452                 return rc;
8453
8454         ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8455         rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8456         if (rc)
8457                 return rc;
8458
8459         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
8460                 goto skip_rdma;
8461
8462         ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8463         /* 128K extra is needed to accommodate static AH context
8464          * allocation by f/w.
8465          */
8466         num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
8467         num_ah = min_t(u32, num_mr, 1024 * 128);
8468         ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
8469         if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
8470                 ctxm->mrav_av_entries = num_ah;
8471
8472         rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
8473         if (rc)
8474                 return rc;
8475         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
8476
8477         ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8478         rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
8479         if (rc)
8480                 return rc;
8481         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
8482
8483 skip_rdma:
8484         ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8485         min = ctxm->min_entries;
8486         entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
8487                      2 * (extra_qps + qp1_qps) + min;
8488         rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
8489         if (rc)
8490                 return rc;
8491
8492         ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8493         entries = l2_qps + 2 * (extra_qps + qp1_qps);
8494         rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
8495         if (rc)
8496                 return rc;
8497         for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
8498                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
8499         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
8500
8501         if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8502                 rc = bnxt_backing_store_cfg_v2(bp, ena);
8503         else
8504                 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
8505         if (rc) {
8506                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
8507                            rc);
8508                 return rc;
8509         }
8510         ctx->flags |= BNXT_CTX_FLAG_INITED;
8511         return 0;
8512 }
8513
8514 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
8515 {
8516         struct hwrm_func_resource_qcaps_output *resp;
8517         struct hwrm_func_resource_qcaps_input *req;
8518         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8519         int rc;
8520
8521         rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
8522         if (rc)
8523                 return rc;
8524
8525         req->fid = cpu_to_le16(0xffff);
8526         resp = hwrm_req_hold(bp, req);
8527         rc = hwrm_req_send_silent(bp, req);
8528         if (rc)
8529                 goto hwrm_func_resc_qcaps_exit;
8530
8531         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
8532         if (!all)
8533                 goto hwrm_func_resc_qcaps_exit;
8534
8535         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
8536         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
8537         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
8538         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
8539         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
8540         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
8541         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
8542         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
8543         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
8544         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
8545         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
8546         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
8547         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
8548         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
8549         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
8550         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
8551
8552         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8553                 u16 max_msix = le16_to_cpu(resp->max_msix);
8554
8555                 hw_resc->max_nqs = max_msix;
8556                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
8557         }
8558
8559         if (BNXT_PF(bp)) {
8560                 struct bnxt_pf_info *pf = &bp->pf;
8561
8562                 pf->vf_resv_strategy =
8563                         le16_to_cpu(resp->vf_reservation_strategy);
8564                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
8565                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
8566         }
8567 hwrm_func_resc_qcaps_exit:
8568         hwrm_req_drop(bp, req);
8569         return rc;
8570 }
8571
8572 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
8573 {
8574         struct hwrm_port_mac_ptp_qcfg_output *resp;
8575         struct hwrm_port_mac_ptp_qcfg_input *req;
8576         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
8577         bool phc_cfg;
8578         u8 flags;
8579         int rc;
8580
8581         if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
8582                 rc = -ENODEV;
8583                 goto no_ptp;
8584         }
8585
8586         rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
8587         if (rc)
8588                 goto no_ptp;
8589
8590         req->port_id = cpu_to_le16(bp->pf.port_id);
8591         resp = hwrm_req_hold(bp, req);
8592         rc = hwrm_req_send(bp, req);
8593         if (rc)
8594                 goto exit;
8595
8596         flags = resp->flags;
8597         if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
8598                 rc = -ENODEV;
8599                 goto exit;
8600         }
8601         if (!ptp) {
8602                 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
8603                 if (!ptp) {
8604                         rc = -ENOMEM;
8605                         goto exit;
8606                 }
8607                 ptp->bp = bp;
8608                 bp->ptp_cfg = ptp;
8609         }
8610         if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
8611                 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
8612                 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
8613         } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8614                 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
8615                 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
8616         } else {
8617                 rc = -ENODEV;
8618                 goto exit;
8619         }
8620         phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
8621         rc = bnxt_ptp_init(bp, phc_cfg);
8622         if (rc)
8623                 netdev_warn(bp->dev, "PTP initialization failed.\n");
8624 exit:
8625         hwrm_req_drop(bp, req);
8626         if (!rc)
8627                 return 0;
8628
8629 no_ptp:
8630         bnxt_ptp_clear(bp);
8631         kfree(ptp);
8632         bp->ptp_cfg = NULL;
8633         return rc;
8634 }
8635
8636 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
8637 {
8638         struct hwrm_func_qcaps_output *resp;
8639         struct hwrm_func_qcaps_input *req;
8640         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8641         u32 flags, flags_ext, flags_ext2;
8642         int rc;
8643
8644         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
8645         if (rc)
8646                 return rc;
8647
8648         req->fid = cpu_to_le16(0xffff);
8649         resp = hwrm_req_hold(bp, req);
8650         rc = hwrm_req_send(bp, req);
8651         if (rc)
8652                 goto hwrm_func_qcaps_exit;
8653
8654         flags = le32_to_cpu(resp->flags);
8655         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
8656                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
8657         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
8658                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
8659         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
8660                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
8661         if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
8662                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
8663         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
8664                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
8665         if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
8666                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
8667         if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
8668                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
8669         if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
8670                 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
8671         if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
8672                 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
8673
8674         flags_ext = le32_to_cpu(resp->flags_ext);
8675         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
8676                 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
8677         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
8678                 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
8679         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
8680                 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
8681         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
8682                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
8683         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
8684                 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
8685         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
8686                 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
8687         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
8688                 bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
8689
8690         flags_ext2 = le32_to_cpu(resp->flags_ext2);
8691         if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
8692                 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
8693         if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
8694                 bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
8695
8696         bp->tx_push_thresh = 0;
8697         if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
8698             BNXT_FW_MAJ(bp) > 217)
8699                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
8700
8701         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
8702         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
8703         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
8704         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
8705         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
8706         if (!hw_resc->max_hw_ring_grps)
8707                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
8708         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
8709         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
8710         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
8711
8712         if (BNXT_PF(bp)) {
8713                 struct bnxt_pf_info *pf = &bp->pf;
8714
8715                 pf->fw_fid = le16_to_cpu(resp->fid);
8716                 pf->port_id = le16_to_cpu(resp->port_id);
8717                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
8718                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
8719                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
8720                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
8721                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
8722                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
8723                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
8724                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
8725                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
8726                 bp->flags &= ~BNXT_FLAG_WOL_CAP;
8727                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
8728                         bp->flags |= BNXT_FLAG_WOL_CAP;
8729                 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
8730                         bp->fw_cap |= BNXT_FW_CAP_PTP;
8731                 } else {
8732                         bnxt_ptp_clear(bp);
8733                         kfree(bp->ptp_cfg);
8734                         bp->ptp_cfg = NULL;
8735                 }
8736         } else {
8737 #ifdef CONFIG_BNXT_SRIOV
8738                 struct bnxt_vf_info *vf = &bp->vf;
8739
8740                 vf->fw_fid = le16_to_cpu(resp->fid);
8741                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
8742 #endif
8743         }
8744
8745 hwrm_func_qcaps_exit:
8746         hwrm_req_drop(bp, req);
8747         return rc;
8748 }
8749
8750 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
8751 {
8752         struct hwrm_dbg_qcaps_output *resp;
8753         struct hwrm_dbg_qcaps_input *req;
8754         int rc;
8755
8756         bp->fw_dbg_cap = 0;
8757         if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
8758                 return;
8759
8760         rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
8761         if (rc)
8762                 return;
8763
8764         req->fid = cpu_to_le16(0xffff);
8765         resp = hwrm_req_hold(bp, req);
8766         rc = hwrm_req_send(bp, req);
8767         if (rc)
8768                 goto hwrm_dbg_qcaps_exit;
8769
8770         bp->fw_dbg_cap = le32_to_cpu(resp->flags);
8771
8772 hwrm_dbg_qcaps_exit:
8773         hwrm_req_drop(bp, req);
8774 }
8775
8776 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
8777
8778 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
8779 {
8780         int rc;
8781
8782         rc = __bnxt_hwrm_func_qcaps(bp);
8783         if (rc)
8784                 return rc;
8785
8786         bnxt_hwrm_dbg_qcaps(bp);
8787
8788         rc = bnxt_hwrm_queue_qportcfg(bp);
8789         if (rc) {
8790                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
8791                 return rc;
8792         }
8793         if (bp->hwrm_spec_code >= 0x10803) {
8794                 rc = bnxt_alloc_ctx_mem(bp);
8795                 if (rc)
8796                         return rc;
8797                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8798                 if (!rc)
8799                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
8800         }
8801         return 0;
8802 }
8803
8804 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
8805 {
8806         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
8807         struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
8808         u32 flags;
8809         int rc;
8810
8811         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
8812                 return 0;
8813
8814         rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
8815         if (rc)
8816                 return rc;
8817
8818         resp = hwrm_req_hold(bp, req);
8819         rc = hwrm_req_send(bp, req);
8820         if (rc)
8821                 goto hwrm_cfa_adv_qcaps_exit;
8822
8823         flags = le32_to_cpu(resp->flags);
8824         if (flags &
8825             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
8826                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
8827
8828 hwrm_cfa_adv_qcaps_exit:
8829         hwrm_req_drop(bp, req);
8830         return rc;
8831 }
8832
8833 static int __bnxt_alloc_fw_health(struct bnxt *bp)
8834 {
8835         if (bp->fw_health)
8836                 return 0;
8837
8838         bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
8839         if (!bp->fw_health)
8840                 return -ENOMEM;
8841
8842         mutex_init(&bp->fw_health->lock);
8843         return 0;
8844 }
8845
8846 static int bnxt_alloc_fw_health(struct bnxt *bp)
8847 {
8848         int rc;
8849
8850         if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
8851             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
8852                 return 0;
8853
8854         rc = __bnxt_alloc_fw_health(bp);
8855         if (rc) {
8856                 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
8857                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
8858                 return rc;
8859         }
8860
8861         return 0;
8862 }
8863
8864 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
8865 {
8866         writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
8867                                          BNXT_GRCPF_REG_WINDOW_BASE_OUT +
8868                                          BNXT_FW_HEALTH_WIN_MAP_OFF);
8869 }
8870
8871 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
8872 {
8873         struct bnxt_fw_health *fw_health = bp->fw_health;
8874         u32 reg_type;
8875
8876         if (!fw_health)
8877                 return;
8878
8879         reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
8880         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
8881                 fw_health->status_reliable = false;
8882
8883         reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
8884         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
8885                 fw_health->resets_reliable = false;
8886 }
8887
8888 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
8889 {
8890         void __iomem *hs;
8891         u32 status_loc;
8892         u32 reg_type;
8893         u32 sig;
8894
8895         if (bp->fw_health)
8896                 bp->fw_health->status_reliable = false;
8897
8898         __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
8899         hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
8900
8901         sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
8902         if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
8903                 if (!bp->chip_num) {
8904                         __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
8905                         bp->chip_num = readl(bp->bar0 +
8906                                              BNXT_FW_HEALTH_WIN_BASE +
8907                                              BNXT_GRC_REG_CHIP_NUM);
8908                 }
8909                 if (!BNXT_CHIP_P5(bp))
8910                         return;
8911
8912                 status_loc = BNXT_GRC_REG_STATUS_P5 |
8913                              BNXT_FW_HEALTH_REG_TYPE_BAR0;
8914         } else {
8915                 status_loc = readl(hs + offsetof(struct hcomm_status,
8916                                                  fw_status_loc));
8917         }
8918
8919         if (__bnxt_alloc_fw_health(bp)) {
8920                 netdev_warn(bp->dev, "no memory for firmware status checks\n");
8921                 return;
8922         }
8923
8924         bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
8925         reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
8926         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
8927                 __bnxt_map_fw_health_reg(bp, status_loc);
8928                 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
8929                         BNXT_FW_HEALTH_WIN_OFF(status_loc);
8930         }
8931
8932         bp->fw_health->status_reliable = true;
8933 }
8934
8935 static int bnxt_map_fw_health_regs(struct bnxt *bp)
8936 {
8937         struct bnxt_fw_health *fw_health = bp->fw_health;
8938         u32 reg_base = 0xffffffff;
8939         int i;
8940
8941         bp->fw_health->status_reliable = false;
8942         bp->fw_health->resets_reliable = false;
8943         /* Only pre-map the monitoring GRC registers using window 3 */
8944         for (i = 0; i < 4; i++) {
8945                 u32 reg = fw_health->regs[i];
8946
8947                 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
8948                         continue;
8949                 if (reg_base == 0xffffffff)
8950                         reg_base = reg & BNXT_GRC_BASE_MASK;
8951                 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
8952                         return -ERANGE;
8953                 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
8954         }
8955         bp->fw_health->status_reliable = true;
8956         bp->fw_health->resets_reliable = true;
8957         if (reg_base == 0xffffffff)
8958                 return 0;
8959
8960         __bnxt_map_fw_health_reg(bp, reg_base);
8961         return 0;
8962 }
8963
8964 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
8965 {
8966         if (!bp->fw_health)
8967                 return;
8968
8969         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
8970                 bp->fw_health->status_reliable = true;
8971                 bp->fw_health->resets_reliable = true;
8972         } else {
8973                 bnxt_try_map_fw_health_reg(bp);
8974         }
8975 }
8976
8977 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
8978 {
8979         struct bnxt_fw_health *fw_health = bp->fw_health;
8980         struct hwrm_error_recovery_qcfg_output *resp;
8981         struct hwrm_error_recovery_qcfg_input *req;
8982         int rc, i;
8983
8984         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
8985                 return 0;
8986
8987         rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
8988         if (rc)
8989                 return rc;
8990
8991         resp = hwrm_req_hold(bp, req);
8992         rc = hwrm_req_send(bp, req);
8993         if (rc)
8994                 goto err_recovery_out;
8995         fw_health->flags = le32_to_cpu(resp->flags);
8996         if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
8997             !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
8998                 rc = -EINVAL;
8999                 goto err_recovery_out;
9000         }
9001         fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
9002         fw_health->master_func_wait_dsecs =
9003                 le32_to_cpu(resp->master_func_wait_period);
9004         fw_health->normal_func_wait_dsecs =
9005                 le32_to_cpu(resp->normal_func_wait_period);
9006         fw_health->post_reset_wait_dsecs =
9007                 le32_to_cpu(resp->master_func_wait_period_after_reset);
9008         fw_health->post_reset_max_wait_dsecs =
9009                 le32_to_cpu(resp->max_bailout_time_after_reset);
9010         fw_health->regs[BNXT_FW_HEALTH_REG] =
9011                 le32_to_cpu(resp->fw_health_status_reg);
9012         fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
9013                 le32_to_cpu(resp->fw_heartbeat_reg);
9014         fw_health->regs[BNXT_FW_RESET_CNT_REG] =
9015                 le32_to_cpu(resp->fw_reset_cnt_reg);
9016         fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
9017                 le32_to_cpu(resp->reset_inprogress_reg);
9018         fw_health->fw_reset_inprog_reg_mask =
9019                 le32_to_cpu(resp->reset_inprogress_reg_mask);
9020         fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
9021         if (fw_health->fw_reset_seq_cnt >= 16) {
9022                 rc = -EINVAL;
9023                 goto err_recovery_out;
9024         }
9025         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
9026                 fw_health->fw_reset_seq_regs[i] =
9027                         le32_to_cpu(resp->reset_reg[i]);
9028                 fw_health->fw_reset_seq_vals[i] =
9029                         le32_to_cpu(resp->reset_reg_val[i]);
9030                 fw_health->fw_reset_seq_delay_msec[i] =
9031                         resp->delay_after_reset[i];
9032         }
9033 err_recovery_out:
9034         hwrm_req_drop(bp, req);
9035         if (!rc)
9036                 rc = bnxt_map_fw_health_regs(bp);
9037         if (rc)
9038                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9039         return rc;
9040 }
9041
9042 static int bnxt_hwrm_func_reset(struct bnxt *bp)
9043 {
9044         struct hwrm_func_reset_input *req;
9045         int rc;
9046
9047         rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
9048         if (rc)
9049                 return rc;
9050
9051         req->enables = 0;
9052         hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
9053         return hwrm_req_send(bp, req);
9054 }
9055
9056 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
9057 {
9058         struct hwrm_nvm_get_dev_info_output nvm_info;
9059
9060         if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
9061                 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
9062                          nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
9063                          nvm_info.nvm_cfg_ver_upd);
9064 }
9065
9066 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
9067 {
9068         struct hwrm_queue_qportcfg_output *resp;
9069         struct hwrm_queue_qportcfg_input *req;
9070         u8 i, j, *qptr;
9071         bool no_rdma;
9072         int rc = 0;
9073
9074         rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
9075         if (rc)
9076                 return rc;
9077
9078         resp = hwrm_req_hold(bp, req);
9079         rc = hwrm_req_send(bp, req);
9080         if (rc)
9081                 goto qportcfg_exit;
9082
9083         if (!resp->max_configurable_queues) {
9084                 rc = -EINVAL;
9085                 goto qportcfg_exit;
9086         }
9087         bp->max_tc = resp->max_configurable_queues;
9088         bp->max_lltc = resp->max_configurable_lossless_queues;
9089         if (bp->max_tc > BNXT_MAX_QUEUE)
9090                 bp->max_tc = BNXT_MAX_QUEUE;
9091
9092         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
9093         qptr = &resp->queue_id0;
9094         for (i = 0, j = 0; i < bp->max_tc; i++) {
9095                 bp->q_info[j].queue_id = *qptr;
9096                 bp->q_ids[i] = *qptr++;
9097                 bp->q_info[j].queue_profile = *qptr++;
9098                 bp->tc_to_qidx[j] = j;
9099                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
9100                     (no_rdma && BNXT_PF(bp)))
9101                         j++;
9102         }
9103         bp->max_q = bp->max_tc;
9104         bp->max_tc = max_t(u8, j, 1);
9105
9106         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
9107                 bp->max_tc = 1;
9108
9109         if (bp->max_lltc > bp->max_tc)
9110                 bp->max_lltc = bp->max_tc;
9111
9112 qportcfg_exit:
9113         hwrm_req_drop(bp, req);
9114         return rc;
9115 }
9116
9117 static int bnxt_hwrm_poll(struct bnxt *bp)
9118 {
9119         struct hwrm_ver_get_input *req;
9120         int rc;
9121
9122         rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9123         if (rc)
9124                 return rc;
9125
9126         req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
9127         req->hwrm_intf_min = HWRM_VERSION_MINOR;
9128         req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
9129
9130         hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
9131         rc = hwrm_req_send(bp, req);
9132         return rc;
9133 }
9134
9135 static int bnxt_hwrm_ver_get(struct bnxt *bp)
9136 {
9137         struct hwrm_ver_get_output *resp;
9138         struct hwrm_ver_get_input *req;
9139         u16 fw_maj, fw_min, fw_bld, fw_rsv;
9140         u32 dev_caps_cfg, hwrm_ver;
9141         int rc, len;
9142
9143         rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9144         if (rc)
9145                 return rc;
9146
9147         hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
9148         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
9149         req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
9150         req->hwrm_intf_min = HWRM_VERSION_MINOR;
9151         req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
9152
9153         resp = hwrm_req_hold(bp, req);
9154         rc = hwrm_req_send(bp, req);
9155         if (rc)
9156                 goto hwrm_ver_get_exit;
9157
9158         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
9159
9160         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
9161                              resp->hwrm_intf_min_8b << 8 |
9162                              resp->hwrm_intf_upd_8b;
9163         if (resp->hwrm_intf_maj_8b < 1) {
9164                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
9165                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
9166                             resp->hwrm_intf_upd_8b);
9167                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
9168         }
9169
9170         hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
9171                         HWRM_VERSION_UPDATE;
9172
9173         if (bp->hwrm_spec_code > hwrm_ver)
9174                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
9175                          HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
9176                          HWRM_VERSION_UPDATE);
9177         else
9178                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
9179                          resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
9180                          resp->hwrm_intf_upd_8b);
9181
9182         fw_maj = le16_to_cpu(resp->hwrm_fw_major);
9183         if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
9184                 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
9185                 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
9186                 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
9187                 len = FW_VER_STR_LEN;
9188         } else {
9189                 fw_maj = resp->hwrm_fw_maj_8b;
9190                 fw_min = resp->hwrm_fw_min_8b;
9191                 fw_bld = resp->hwrm_fw_bld_8b;
9192                 fw_rsv = resp->hwrm_fw_rsvd_8b;
9193                 len = BC_HWRM_STR_LEN;
9194         }
9195         bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
9196         snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
9197                  fw_rsv);
9198
9199         if (strlen(resp->active_pkg_name)) {
9200                 int fw_ver_len = strlen(bp->fw_ver_str);
9201
9202                 snprintf(bp->fw_ver_str + fw_ver_len,
9203                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
9204                          resp->active_pkg_name);
9205                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
9206         }
9207
9208         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
9209         if (!bp->hwrm_cmd_timeout)
9210                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
9211         bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
9212         if (!bp->hwrm_cmd_max_timeout)
9213                 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
9214         else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
9215                 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
9216                             bp->hwrm_cmd_max_timeout / 1000);
9217
9218         if (resp->hwrm_intf_maj_8b >= 1) {
9219                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
9220                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
9221         }
9222         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
9223                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
9224
9225         bp->chip_num = le16_to_cpu(resp->chip_num);
9226         bp->chip_rev = resp->chip_rev;
9227         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
9228             !resp->chip_metal)
9229                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
9230
9231         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
9232         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
9233             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
9234                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
9235
9236         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
9237                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
9238
9239         if (dev_caps_cfg &
9240             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
9241                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
9242
9243         if (dev_caps_cfg &
9244             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
9245                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
9246
9247         if (dev_caps_cfg &
9248             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
9249                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
9250
9251 hwrm_ver_get_exit:
9252         hwrm_req_drop(bp, req);
9253         return rc;
9254 }
9255
9256 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
9257 {
9258         struct hwrm_fw_set_time_input *req;
9259         struct tm tm;
9260         time64_t now = ktime_get_real_seconds();
9261         int rc;
9262
9263         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
9264             bp->hwrm_spec_code < 0x10400)
9265                 return -EOPNOTSUPP;
9266
9267         time64_to_tm(now, 0, &tm);
9268         rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
9269         if (rc)
9270                 return rc;
9271
9272         req->year = cpu_to_le16(1900 + tm.tm_year);
9273         req->month = 1 + tm.tm_mon;
9274         req->day = tm.tm_mday;
9275         req->hour = tm.tm_hour;
9276         req->minute = tm.tm_min;
9277         req->second = tm.tm_sec;
9278         return hwrm_req_send(bp, req);
9279 }
9280
9281 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
9282 {
9283         u64 sw_tmp;
9284
9285         hw &= mask;
9286         sw_tmp = (*sw & ~mask) | hw;
9287         if (hw < (*sw & mask))
9288                 sw_tmp += mask + 1;
9289         WRITE_ONCE(*sw, sw_tmp);
9290 }
9291
9292 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
9293                                     int count, bool ignore_zero)
9294 {
9295         int i;
9296
9297         for (i = 0; i < count; i++) {
9298                 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
9299
9300                 if (ignore_zero && !hw)
9301                         continue;
9302
9303                 if (masks[i] == -1ULL)
9304                         sw_stats[i] = hw;
9305                 else
9306                         bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
9307         }
9308 }
9309
9310 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
9311 {
9312         if (!stats->hw_stats)
9313                 return;
9314
9315         __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
9316                                 stats->hw_masks, stats->len / 8, false);
9317 }
9318
9319 static void bnxt_accumulate_all_stats(struct bnxt *bp)
9320 {
9321         struct bnxt_stats_mem *ring0_stats;
9322         bool ignore_zero = false;
9323         int i;
9324
9325         /* Chip bug.  Counter intermittently becomes 0. */
9326         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9327                 ignore_zero = true;
9328
9329         for (i = 0; i < bp->cp_nr_rings; i++) {
9330                 struct bnxt_napi *bnapi = bp->bnapi[i];
9331                 struct bnxt_cp_ring_info *cpr;
9332                 struct bnxt_stats_mem *stats;
9333
9334                 cpr = &bnapi->cp_ring;
9335                 stats = &cpr->stats;
9336                 if (!i)
9337                         ring0_stats = stats;
9338                 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
9339                                         ring0_stats->hw_masks,
9340                                         ring0_stats->len / 8, ignore_zero);
9341         }
9342         if (bp->flags & BNXT_FLAG_PORT_STATS) {
9343                 struct bnxt_stats_mem *stats = &bp->port_stats;
9344                 __le64 *hw_stats = stats->hw_stats;
9345                 u64 *sw_stats = stats->sw_stats;
9346                 u64 *masks = stats->hw_masks;
9347                 int cnt;
9348
9349                 cnt = sizeof(struct rx_port_stats) / 8;
9350                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
9351
9352                 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9353                 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9354                 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9355                 cnt = sizeof(struct tx_port_stats) / 8;
9356                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
9357         }
9358         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
9359                 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
9360                 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
9361         }
9362 }
9363
9364 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
9365 {
9366         struct hwrm_port_qstats_input *req;
9367         struct bnxt_pf_info *pf = &bp->pf;
9368         int rc;
9369
9370         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
9371                 return 0;
9372
9373         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
9374                 return -EOPNOTSUPP;
9375
9376         rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
9377         if (rc)
9378                 return rc;
9379
9380         req->flags = flags;
9381         req->port_id = cpu_to_le16(pf->port_id);
9382         req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
9383                                             BNXT_TX_PORT_STATS_BYTE_OFFSET);
9384         req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
9385         return hwrm_req_send(bp, req);
9386 }
9387
9388 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
9389 {
9390         struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
9391         struct hwrm_queue_pri2cos_qcfg_input *req_qc;
9392         struct hwrm_port_qstats_ext_output *resp_qs;
9393         struct hwrm_port_qstats_ext_input *req_qs;
9394         struct bnxt_pf_info *pf = &bp->pf;
9395         u32 tx_stat_size;
9396         int rc;
9397
9398         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
9399                 return 0;
9400
9401         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
9402                 return -EOPNOTSUPP;
9403
9404         rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
9405         if (rc)
9406                 return rc;
9407
9408         req_qs->flags = flags;
9409         req_qs->port_id = cpu_to_le16(pf->port_id);
9410         req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
9411         req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
9412         tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
9413                        sizeof(struct tx_port_stats_ext) : 0;
9414         req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
9415         req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
9416         resp_qs = hwrm_req_hold(bp, req_qs);
9417         rc = hwrm_req_send(bp, req_qs);
9418         if (!rc) {
9419                 bp->fw_rx_stats_ext_size =
9420                         le16_to_cpu(resp_qs->rx_stat_size) / 8;
9421                 if (BNXT_FW_MAJ(bp) < 220 &&
9422                     bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
9423                         bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
9424
9425                 bp->fw_tx_stats_ext_size = tx_stat_size ?
9426                         le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
9427         } else {
9428                 bp->fw_rx_stats_ext_size = 0;
9429                 bp->fw_tx_stats_ext_size = 0;
9430         }
9431         hwrm_req_drop(bp, req_qs);
9432
9433         if (flags)
9434                 return rc;
9435
9436         if (bp->fw_tx_stats_ext_size <=
9437             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
9438                 bp->pri2cos_valid = 0;
9439                 return rc;
9440         }
9441
9442         rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
9443         if (rc)
9444                 return rc;
9445
9446         req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
9447
9448         resp_qc = hwrm_req_hold(bp, req_qc);
9449         rc = hwrm_req_send(bp, req_qc);
9450         if (!rc) {
9451                 u8 *pri2cos;
9452                 int i, j;
9453
9454                 pri2cos = &resp_qc->pri0_cos_queue_id;
9455                 for (i = 0; i < 8; i++) {
9456                         u8 queue_id = pri2cos[i];
9457                         u8 queue_idx;
9458
9459                         /* Per port queue IDs start from 0, 10, 20, etc */
9460                         queue_idx = queue_id % 10;
9461                         if (queue_idx > BNXT_MAX_QUEUE) {
9462                                 bp->pri2cos_valid = false;
9463                                 hwrm_req_drop(bp, req_qc);
9464                                 return rc;
9465                         }
9466                         for (j = 0; j < bp->max_q; j++) {
9467                                 if (bp->q_ids[j] == queue_id)
9468                                         bp->pri2cos_idx[i] = queue_idx;
9469                         }
9470                 }
9471                 bp->pri2cos_valid = true;
9472         }
9473         hwrm_req_drop(bp, req_qc);
9474
9475         return rc;
9476 }
9477
9478 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
9479 {
9480         bnxt_hwrm_tunnel_dst_port_free(bp,
9481                 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9482         bnxt_hwrm_tunnel_dst_port_free(bp,
9483                 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9484 }
9485
9486 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
9487 {
9488         int rc, i;
9489         u32 tpa_flags = 0;
9490
9491         if (set_tpa)
9492                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
9493         else if (BNXT_NO_FW_ACCESS(bp))
9494                 return 0;
9495         for (i = 0; i < bp->nr_vnics; i++) {
9496                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
9497                 if (rc) {
9498                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
9499                                    i, rc);
9500                         return rc;
9501                 }
9502         }
9503         return 0;
9504 }
9505
9506 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
9507 {
9508         int i;
9509
9510         for (i = 0; i < bp->nr_vnics; i++)
9511                 bnxt_hwrm_vnic_set_rss(bp, i, false);
9512 }
9513
9514 static void bnxt_clear_vnic(struct bnxt *bp)
9515 {
9516         if (!bp->vnic_info)
9517                 return;
9518
9519         bnxt_hwrm_clear_vnic_filter(bp);
9520         if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
9521                 /* clear all RSS setting before free vnic ctx */
9522                 bnxt_hwrm_clear_vnic_rss(bp);
9523                 bnxt_hwrm_vnic_ctx_free(bp);
9524         }
9525         /* before free the vnic, undo the vnic tpa settings */
9526         if (bp->flags & BNXT_FLAG_TPA)
9527                 bnxt_set_tpa(bp, false);
9528         bnxt_hwrm_vnic_free(bp);
9529         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9530                 bnxt_hwrm_vnic_ctx_free(bp);
9531 }
9532
9533 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
9534                                     bool irq_re_init)
9535 {
9536         bnxt_clear_vnic(bp);
9537         bnxt_hwrm_ring_free(bp, close_path);
9538         bnxt_hwrm_ring_grp_free(bp);
9539         if (irq_re_init) {
9540                 bnxt_hwrm_stat_ctx_free(bp);
9541                 bnxt_hwrm_free_tunnel_ports(bp);
9542         }
9543 }
9544
9545 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
9546 {
9547         struct hwrm_func_cfg_input *req;
9548         u8 evb_mode;
9549         int rc;
9550
9551         if (br_mode == BRIDGE_MODE_VEB)
9552                 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
9553         else if (br_mode == BRIDGE_MODE_VEPA)
9554                 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
9555         else
9556                 return -EINVAL;
9557
9558         rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
9559         if (rc)
9560                 return rc;
9561
9562         req->fid = cpu_to_le16(0xffff);
9563         req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
9564         req->evb_mode = evb_mode;
9565         return hwrm_req_send(bp, req);
9566 }
9567
9568 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
9569 {
9570         struct hwrm_func_cfg_input *req;
9571         int rc;
9572
9573         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
9574                 return 0;
9575
9576         rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
9577         if (rc)
9578                 return rc;
9579
9580         req->fid = cpu_to_le16(0xffff);
9581         req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
9582         req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
9583         if (size == 128)
9584                 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
9585
9586         return hwrm_req_send(bp, req);
9587 }
9588
9589 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
9590 {
9591         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
9592         int rc;
9593
9594         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
9595                 goto skip_rss_ctx;
9596
9597         /* allocate context for vnic */
9598         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
9599         if (rc) {
9600                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
9601                            vnic_id, rc);
9602                 goto vnic_setup_err;
9603         }
9604         bp->rsscos_nr_ctxs++;
9605
9606         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9607                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
9608                 if (rc) {
9609                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
9610                                    vnic_id, rc);
9611                         goto vnic_setup_err;
9612                 }
9613                 bp->rsscos_nr_ctxs++;
9614         }
9615
9616 skip_rss_ctx:
9617         /* configure default vnic, ring grp */
9618         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
9619         if (rc) {
9620                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
9621                            vnic_id, rc);
9622                 goto vnic_setup_err;
9623         }
9624
9625         /* Enable RSS hashing on vnic */
9626         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
9627         if (rc) {
9628                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
9629                            vnic_id, rc);
9630                 goto vnic_setup_err;
9631         }
9632
9633         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
9634                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
9635                 if (rc) {
9636                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
9637                                    vnic_id, rc);
9638                 }
9639         }
9640
9641 vnic_setup_err:
9642         return rc;
9643 }
9644
9645 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
9646 {
9647         int rc, i, nr_ctxs;
9648
9649         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
9650         for (i = 0; i < nr_ctxs; i++) {
9651                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
9652                 if (rc) {
9653                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
9654                                    vnic_id, i, rc);
9655                         break;
9656                 }
9657                 bp->rsscos_nr_ctxs++;
9658         }
9659         if (i < nr_ctxs)
9660                 return -ENOMEM;
9661
9662         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
9663         if (rc) {
9664                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
9665                            vnic_id, rc);
9666                 return rc;
9667         }
9668         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
9669         if (rc) {
9670                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
9671                            vnic_id, rc);
9672                 return rc;
9673         }
9674         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
9675                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
9676                 if (rc) {
9677                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
9678                                    vnic_id, rc);
9679                 }
9680         }
9681         return rc;
9682 }
9683
9684 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
9685 {
9686         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9687                 return __bnxt_setup_vnic_p5(bp, vnic_id);
9688         else
9689                 return __bnxt_setup_vnic(bp, vnic_id);
9690 }
9691
9692 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
9693 {
9694         int i, rc = 0;
9695
9696         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9697                 return 0;
9698
9699         for (i = 0; i < bp->rx_nr_rings; i++) {
9700                 struct bnxt_vnic_info *vnic;
9701                 u16 vnic_id = i + 1;
9702                 u16 ring_id = i;
9703
9704                 if (vnic_id >= bp->nr_vnics)
9705                         break;
9706
9707                 vnic = &bp->vnic_info[vnic_id];
9708                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
9709                 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
9710                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
9711                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
9712                 if (rc) {
9713                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
9714                                    vnic_id, rc);
9715                         break;
9716                 }
9717                 rc = bnxt_setup_vnic(bp, vnic_id);
9718                 if (rc)
9719                         break;
9720         }
9721         return rc;
9722 }
9723
9724 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
9725 static bool bnxt_promisc_ok(struct bnxt *bp)
9726 {
9727 #ifdef CONFIG_BNXT_SRIOV
9728         if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
9729                 return false;
9730 #endif
9731         return true;
9732 }
9733
9734 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
9735 {
9736         unsigned int rc = 0;
9737
9738         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
9739         if (rc) {
9740                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
9741                            rc);
9742                 return rc;
9743         }
9744
9745         rc = bnxt_hwrm_vnic_cfg(bp, 1);
9746         if (rc) {
9747                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
9748                            rc);
9749                 return rc;
9750         }
9751         return rc;
9752 }
9753
9754 static int bnxt_cfg_rx_mode(struct bnxt *);
9755 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
9756
9757 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
9758 {
9759         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9760         int rc = 0;
9761         unsigned int rx_nr_rings = bp->rx_nr_rings;
9762
9763         if (irq_re_init) {
9764                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
9765                 if (rc) {
9766                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
9767                                    rc);
9768                         goto err_out;
9769                 }
9770         }
9771
9772         rc = bnxt_hwrm_ring_alloc(bp);
9773         if (rc) {
9774                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
9775                 goto err_out;
9776         }
9777
9778         rc = bnxt_hwrm_ring_grp_alloc(bp);
9779         if (rc) {
9780                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
9781                 goto err_out;
9782         }
9783
9784         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9785                 rx_nr_rings--;
9786
9787         /* default vnic 0 */
9788         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
9789         if (rc) {
9790                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
9791                 goto err_out;
9792         }
9793
9794         if (BNXT_VF(bp))
9795                 bnxt_hwrm_func_qcfg(bp);
9796
9797         rc = bnxt_setup_vnic(bp, 0);
9798         if (rc)
9799                 goto err_out;
9800         if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
9801                 bnxt_hwrm_update_rss_hash_cfg(bp);
9802
9803         if (bp->flags & BNXT_FLAG_RFS) {
9804                 rc = bnxt_alloc_rfs_vnics(bp);
9805                 if (rc)
9806                         goto err_out;
9807         }
9808
9809         if (bp->flags & BNXT_FLAG_TPA) {
9810                 rc = bnxt_set_tpa(bp, true);
9811                 if (rc)
9812                         goto err_out;
9813         }
9814
9815         if (BNXT_VF(bp))
9816                 bnxt_update_vf_mac(bp);
9817
9818         /* Filter for default vnic 0 */
9819         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
9820         if (rc) {
9821                 if (BNXT_VF(bp) && rc == -ENODEV)
9822                         netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
9823                 else
9824                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
9825                 goto err_out;
9826         }
9827         vnic->uc_filter_count = 1;
9828
9829         vnic->rx_mask = 0;
9830         if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
9831                 goto skip_rx_mask;
9832
9833         if (bp->dev->flags & IFF_BROADCAST)
9834                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
9835
9836         if (bp->dev->flags & IFF_PROMISC)
9837                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9838
9839         if (bp->dev->flags & IFF_ALLMULTI) {
9840                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9841                 vnic->mc_list_count = 0;
9842         } else if (bp->dev->flags & IFF_MULTICAST) {
9843                 u32 mask = 0;
9844
9845                 bnxt_mc_list_updated(bp, &mask);
9846                 vnic->rx_mask |= mask;
9847         }
9848
9849         rc = bnxt_cfg_rx_mode(bp);
9850         if (rc)
9851                 goto err_out;
9852
9853 skip_rx_mask:
9854         rc = bnxt_hwrm_set_coal(bp);
9855         if (rc)
9856                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
9857                                 rc);
9858
9859         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9860                 rc = bnxt_setup_nitroa0_vnic(bp);
9861                 if (rc)
9862                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
9863                                    rc);
9864         }
9865
9866         if (BNXT_VF(bp)) {
9867                 bnxt_hwrm_func_qcfg(bp);
9868                 netdev_update_features(bp->dev);
9869         }
9870
9871         return 0;
9872
9873 err_out:
9874         bnxt_hwrm_resource_free(bp, 0, true);
9875
9876         return rc;
9877 }
9878
9879 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
9880 {
9881         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
9882         return 0;
9883 }
9884
9885 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
9886 {
9887         bnxt_init_cp_rings(bp);
9888         bnxt_init_rx_rings(bp);
9889         bnxt_init_tx_rings(bp);
9890         bnxt_init_ring_grps(bp, irq_re_init);
9891         bnxt_init_vnics(bp);
9892
9893         return bnxt_init_chip(bp, irq_re_init);
9894 }
9895
9896 static int bnxt_set_real_num_queues(struct bnxt *bp)
9897 {
9898         int rc;
9899         struct net_device *dev = bp->dev;
9900
9901         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
9902                                           bp->tx_nr_rings_xdp);
9903         if (rc)
9904                 return rc;
9905
9906         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
9907         if (rc)
9908                 return rc;
9909
9910 #ifdef CONFIG_RFS_ACCEL
9911         if (bp->flags & BNXT_FLAG_RFS)
9912                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
9913 #endif
9914
9915         return rc;
9916 }
9917
9918 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
9919                              bool shared)
9920 {
9921         int _rx = *rx, _tx = *tx;
9922
9923         if (shared) {
9924                 *rx = min_t(int, _rx, max);
9925                 *tx = min_t(int, _tx, max);
9926         } else {
9927                 if (max < 2)
9928                         return -ENOMEM;
9929
9930                 while (_rx + _tx > max) {
9931                         if (_rx > _tx && _rx > 1)
9932                                 _rx--;
9933                         else if (_tx > 1)
9934                                 _tx--;
9935                 }
9936                 *rx = _rx;
9937                 *tx = _tx;
9938         }
9939         return 0;
9940 }
9941
9942 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
9943 {
9944         return (tx - tx_xdp) / tx_sets + tx_xdp;
9945 }
9946
9947 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
9948 {
9949         int tcs = bp->num_tc;
9950
9951         if (!tcs)
9952                 tcs = 1;
9953         return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
9954 }
9955
9956 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
9957 {
9958         int tcs = bp->num_tc;
9959
9960         return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
9961                bp->tx_nr_rings_xdp;
9962 }
9963
9964 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
9965                            bool sh)
9966 {
9967         int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
9968
9969         if (tx_cp != *tx) {
9970                 int tx_saved = tx_cp, rc;
9971
9972                 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
9973                 if (rc)
9974                         return rc;
9975                 if (tx_cp != tx_saved)
9976                         *tx = bnxt_num_cp_to_tx(bp, tx_cp);
9977                 return 0;
9978         }
9979         return __bnxt_trim_rings(bp, rx, tx, max, sh);
9980 }
9981
9982 static void bnxt_setup_msix(struct bnxt *bp)
9983 {
9984         const int len = sizeof(bp->irq_tbl[0].name);
9985         struct net_device *dev = bp->dev;
9986         int tcs, i;
9987
9988         tcs = bp->num_tc;
9989         if (tcs) {
9990                 int i, off, count;
9991
9992                 for (i = 0; i < tcs; i++) {
9993                         count = bp->tx_nr_rings_per_tc;
9994                         off = BNXT_TC_TO_RING_BASE(bp, i);
9995                         netdev_set_tc_queue(dev, i, count, off);
9996                 }
9997         }
9998
9999         for (i = 0; i < bp->cp_nr_rings; i++) {
10000                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10001                 char *attr;
10002
10003                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10004                         attr = "TxRx";
10005                 else if (i < bp->rx_nr_rings)
10006                         attr = "rx";
10007                 else
10008                         attr = "tx";
10009
10010                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
10011                          attr, i);
10012                 bp->irq_tbl[map_idx].handler = bnxt_msix;
10013         }
10014 }
10015
10016 static void bnxt_setup_inta(struct bnxt *bp)
10017 {
10018         const int len = sizeof(bp->irq_tbl[0].name);
10019
10020         if (bp->num_tc) {
10021                 netdev_reset_tc(bp->dev);
10022                 bp->num_tc = 0;
10023         }
10024
10025         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
10026                  0);
10027         bp->irq_tbl[0].handler = bnxt_inta;
10028 }
10029
10030 static int bnxt_init_int_mode(struct bnxt *bp);
10031
10032 static int bnxt_setup_int_mode(struct bnxt *bp)
10033 {
10034         int rc;
10035
10036         if (!bp->irq_tbl) {
10037                 rc = bnxt_init_int_mode(bp);
10038                 if (rc || !bp->irq_tbl)
10039                         return rc ?: -ENODEV;
10040         }
10041
10042         if (bp->flags & BNXT_FLAG_USING_MSIX)
10043                 bnxt_setup_msix(bp);
10044         else
10045                 bnxt_setup_inta(bp);
10046
10047         rc = bnxt_set_real_num_queues(bp);
10048         return rc;
10049 }
10050
10051 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
10052 {
10053         return bp->hw_resc.max_rsscos_ctxs;
10054 }
10055
10056 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
10057 {
10058         return bp->hw_resc.max_vnics;
10059 }
10060
10061 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
10062 {
10063         return bp->hw_resc.max_stat_ctxs;
10064 }
10065
10066 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
10067 {
10068         return bp->hw_resc.max_cp_rings;
10069 }
10070
10071 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
10072 {
10073         unsigned int cp = bp->hw_resc.max_cp_rings;
10074
10075         if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
10076                 cp -= bnxt_get_ulp_msix_num(bp);
10077
10078         return cp;
10079 }
10080
10081 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
10082 {
10083         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10084
10085         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10086                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
10087
10088         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
10089 }
10090
10091 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
10092 {
10093         bp->hw_resc.max_irqs = max_irqs;
10094 }
10095
10096 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
10097 {
10098         unsigned int cp;
10099
10100         cp = bnxt_get_max_func_cp_rings_for_en(bp);
10101         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10102                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
10103         else
10104                 return cp - bp->cp_nr_rings;
10105 }
10106
10107 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
10108 {
10109         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
10110 }
10111
10112 int bnxt_get_avail_msix(struct bnxt *bp, int num)
10113 {
10114         int max_cp = bnxt_get_max_func_cp_rings(bp);
10115         int max_irq = bnxt_get_max_func_irqs(bp);
10116         int total_req = bp->cp_nr_rings + num;
10117         int max_idx, avail_msix;
10118
10119         max_idx = bp->total_irqs;
10120         if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
10121                 max_idx = min_t(int, bp->total_irqs, max_cp);
10122         avail_msix = max_idx - bp->cp_nr_rings;
10123         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
10124                 return avail_msix;
10125
10126         if (max_irq < total_req) {
10127                 num = max_irq - bp->cp_nr_rings;
10128                 if (num <= 0)
10129                         return 0;
10130         }
10131         return num;
10132 }
10133
10134 static int bnxt_get_num_msix(struct bnxt *bp)
10135 {
10136         if (!BNXT_NEW_RM(bp))
10137                 return bnxt_get_max_func_irqs(bp);
10138
10139         return bnxt_nq_rings_in_use(bp);
10140 }
10141
10142 static int bnxt_init_msix(struct bnxt *bp)
10143 {
10144         int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp;
10145         struct msix_entry *msix_ent;
10146
10147         total_vecs = bnxt_get_num_msix(bp);
10148         max = bnxt_get_max_func_irqs(bp);
10149         if (total_vecs > max)
10150                 total_vecs = max;
10151
10152         if (!total_vecs)
10153                 return 0;
10154
10155         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
10156         if (!msix_ent)
10157                 return -ENOMEM;
10158
10159         for (i = 0; i < total_vecs; i++) {
10160                 msix_ent[i].entry = i;
10161                 msix_ent[i].vector = 0;
10162         }
10163
10164         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
10165                 min = 2;
10166
10167         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
10168         ulp_msix = bnxt_get_ulp_msix_num(bp);
10169         if (total_vecs < 0 || total_vecs < ulp_msix) {
10170                 rc = -ENODEV;
10171                 goto msix_setup_exit;
10172         }
10173
10174         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
10175         if (bp->irq_tbl) {
10176                 for (i = 0; i < total_vecs; i++)
10177                         bp->irq_tbl[i].vector = msix_ent[i].vector;
10178
10179                 bp->total_irqs = total_vecs;
10180                 /* Trim rings based upon num of vectors allocated */
10181                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
10182                                      total_vecs - ulp_msix, min == 1);
10183                 if (rc)
10184                         goto msix_setup_exit;
10185
10186                 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
10187                 bp->cp_nr_rings = (min == 1) ?
10188                                   max_t(int, tx_cp, bp->rx_nr_rings) :
10189                                   tx_cp + bp->rx_nr_rings;
10190
10191         } else {
10192                 rc = -ENOMEM;
10193                 goto msix_setup_exit;
10194         }
10195         bp->flags |= BNXT_FLAG_USING_MSIX;
10196         kfree(msix_ent);
10197         return 0;
10198
10199 msix_setup_exit:
10200         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
10201         kfree(bp->irq_tbl);
10202         bp->irq_tbl = NULL;
10203         pci_disable_msix(bp->pdev);
10204         kfree(msix_ent);
10205         return rc;
10206 }
10207
10208 static int bnxt_init_inta(struct bnxt *bp)
10209 {
10210         bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
10211         if (!bp->irq_tbl)
10212                 return -ENOMEM;
10213
10214         bp->total_irqs = 1;
10215         bp->rx_nr_rings = 1;
10216         bp->tx_nr_rings = 1;
10217         bp->cp_nr_rings = 1;
10218         bp->flags |= BNXT_FLAG_SHARED_RINGS;
10219         bp->irq_tbl[0].vector = bp->pdev->irq;
10220         return 0;
10221 }
10222
10223 static int bnxt_init_int_mode(struct bnxt *bp)
10224 {
10225         int rc = -ENODEV;
10226
10227         if (bp->flags & BNXT_FLAG_MSIX_CAP)
10228                 rc = bnxt_init_msix(bp);
10229
10230         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
10231                 /* fallback to INTA */
10232                 rc = bnxt_init_inta(bp);
10233         }
10234         return rc;
10235 }
10236
10237 static void bnxt_clear_int_mode(struct bnxt *bp)
10238 {
10239         if (bp->flags & BNXT_FLAG_USING_MSIX)
10240                 pci_disable_msix(bp->pdev);
10241
10242         kfree(bp->irq_tbl);
10243         bp->irq_tbl = NULL;
10244         bp->flags &= ~BNXT_FLAG_USING_MSIX;
10245 }
10246
10247 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
10248 {
10249         bool irq_cleared = false;
10250         int tcs = bp->num_tc;
10251         int rc;
10252
10253         if (!bnxt_need_reserve_rings(bp))
10254                 return 0;
10255
10256         if (irq_re_init && BNXT_NEW_RM(bp) &&
10257             bnxt_get_num_msix(bp) != bp->total_irqs) {
10258                 bnxt_ulp_irq_stop(bp);
10259                 bnxt_clear_int_mode(bp);
10260                 irq_cleared = true;
10261         }
10262         rc = __bnxt_reserve_rings(bp);
10263         if (irq_cleared) {
10264                 if (!rc)
10265                         rc = bnxt_init_int_mode(bp);
10266                 bnxt_ulp_irq_restart(bp, rc);
10267         }
10268         if (rc) {
10269                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
10270                 return rc;
10271         }
10272         if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
10273                     bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
10274                 netdev_err(bp->dev, "tx ring reservation failure\n");
10275                 netdev_reset_tc(bp->dev);
10276                 bp->num_tc = 0;
10277                 if (bp->tx_nr_rings_xdp)
10278                         bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
10279                 else
10280                         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10281                 return -ENOMEM;
10282         }
10283         return 0;
10284 }
10285
10286 static void bnxt_free_irq(struct bnxt *bp)
10287 {
10288         struct bnxt_irq *irq;
10289         int i;
10290
10291 #ifdef CONFIG_RFS_ACCEL
10292         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
10293         bp->dev->rx_cpu_rmap = NULL;
10294 #endif
10295         if (!bp->irq_tbl || !bp->bnapi)
10296                 return;
10297
10298         for (i = 0; i < bp->cp_nr_rings; i++) {
10299                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10300
10301                 irq = &bp->irq_tbl[map_idx];
10302                 if (irq->requested) {
10303                         if (irq->have_cpumask) {
10304                                 irq_set_affinity_hint(irq->vector, NULL);
10305                                 free_cpumask_var(irq->cpu_mask);
10306                                 irq->have_cpumask = 0;
10307                         }
10308                         free_irq(irq->vector, bp->bnapi[i]);
10309                 }
10310
10311                 irq->requested = 0;
10312         }
10313 }
10314
10315 static int bnxt_request_irq(struct bnxt *bp)
10316 {
10317         int i, j, rc = 0;
10318         unsigned long flags = 0;
10319 #ifdef CONFIG_RFS_ACCEL
10320         struct cpu_rmap *rmap;
10321 #endif
10322
10323         rc = bnxt_setup_int_mode(bp);
10324         if (rc) {
10325                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
10326                            rc);
10327                 return rc;
10328         }
10329 #ifdef CONFIG_RFS_ACCEL
10330         rmap = bp->dev->rx_cpu_rmap;
10331 #endif
10332         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
10333                 flags = IRQF_SHARED;
10334
10335         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
10336                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10337                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
10338
10339 #ifdef CONFIG_RFS_ACCEL
10340                 if (rmap && bp->bnapi[i]->rx_ring) {
10341                         rc = irq_cpu_rmap_add(rmap, irq->vector);
10342                         if (rc)
10343                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
10344                                             j);
10345                         j++;
10346                 }
10347 #endif
10348                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
10349                                  bp->bnapi[i]);
10350                 if (rc)
10351                         break;
10352
10353                 netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
10354                 irq->requested = 1;
10355
10356                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
10357                         int numa_node = dev_to_node(&bp->pdev->dev);
10358
10359                         irq->have_cpumask = 1;
10360                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
10361                                         irq->cpu_mask);
10362                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
10363                         if (rc) {
10364                                 netdev_warn(bp->dev,
10365                                             "Set affinity failed, IRQ = %d\n",
10366                                             irq->vector);
10367                                 break;
10368                         }
10369                 }
10370         }
10371         return rc;
10372 }
10373
10374 static void bnxt_del_napi(struct bnxt *bp)
10375 {
10376         int i;
10377
10378         if (!bp->bnapi)
10379                 return;
10380
10381         for (i = 0; i < bp->rx_nr_rings; i++)
10382                 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
10383         for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
10384                 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
10385
10386         for (i = 0; i < bp->cp_nr_rings; i++) {
10387                 struct bnxt_napi *bnapi = bp->bnapi[i];
10388
10389                 __netif_napi_del(&bnapi->napi);
10390         }
10391         /* We called __netif_napi_del(), we need
10392          * to respect an RCU grace period before freeing napi structures.
10393          */
10394         synchronize_net();
10395 }
10396
10397 static void bnxt_init_napi(struct bnxt *bp)
10398 {
10399         int i;
10400         unsigned int cp_nr_rings = bp->cp_nr_rings;
10401         struct bnxt_napi *bnapi;
10402
10403         if (bp->flags & BNXT_FLAG_USING_MSIX) {
10404                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
10405
10406                 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10407                         poll_fn = bnxt_poll_p5;
10408                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10409                         cp_nr_rings--;
10410                 for (i = 0; i < cp_nr_rings; i++) {
10411                         bnapi = bp->bnapi[i];
10412                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
10413                 }
10414                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10415                         bnapi = bp->bnapi[cp_nr_rings];
10416                         netif_napi_add(bp->dev, &bnapi->napi,
10417                                        bnxt_poll_nitroa0);
10418                 }
10419         } else {
10420                 bnapi = bp->bnapi[0];
10421                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
10422         }
10423 }
10424
10425 static void bnxt_disable_napi(struct bnxt *bp)
10426 {
10427         int i;
10428
10429         if (!bp->bnapi ||
10430             test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
10431                 return;
10432
10433         for (i = 0; i < bp->cp_nr_rings; i++) {
10434                 struct bnxt_napi *bnapi = bp->bnapi[i];
10435                 struct bnxt_cp_ring_info *cpr;
10436
10437                 cpr = &bnapi->cp_ring;
10438                 if (bnapi->tx_fault)
10439                         cpr->sw_stats.tx.tx_resets++;
10440                 if (bnapi->in_reset)
10441                         cpr->sw_stats.rx.rx_resets++;
10442                 napi_disable(&bnapi->napi);
10443                 if (bnapi->rx_ring)
10444                         cancel_work_sync(&cpr->dim.work);
10445         }
10446 }
10447
10448 static void bnxt_enable_napi(struct bnxt *bp)
10449 {
10450         int i;
10451
10452         clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
10453         for (i = 0; i < bp->cp_nr_rings; i++) {
10454                 struct bnxt_napi *bnapi = bp->bnapi[i];
10455                 struct bnxt_cp_ring_info *cpr;
10456
10457                 bnapi->tx_fault = 0;
10458
10459                 cpr = &bnapi->cp_ring;
10460                 bnapi->in_reset = false;
10461
10462                 if (bnapi->rx_ring) {
10463                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
10464                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
10465                 }
10466                 napi_enable(&bnapi->napi);
10467         }
10468 }
10469
10470 void bnxt_tx_disable(struct bnxt *bp)
10471 {
10472         int i;
10473         struct bnxt_tx_ring_info *txr;
10474
10475         if (bp->tx_ring) {
10476                 for (i = 0; i < bp->tx_nr_rings; i++) {
10477                         txr = &bp->tx_ring[i];
10478                         WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
10479                 }
10480         }
10481         /* Make sure napi polls see @dev_state change */
10482         synchronize_net();
10483         /* Drop carrier first to prevent TX timeout */
10484         netif_carrier_off(bp->dev);
10485         /* Stop all TX queues */
10486         netif_tx_disable(bp->dev);
10487 }
10488
10489 void bnxt_tx_enable(struct bnxt *bp)
10490 {
10491         int i;
10492         struct bnxt_tx_ring_info *txr;
10493
10494         for (i = 0; i < bp->tx_nr_rings; i++) {
10495                 txr = &bp->tx_ring[i];
10496                 WRITE_ONCE(txr->dev_state, 0);
10497         }
10498         /* Make sure napi polls see @dev_state change */
10499         synchronize_net();
10500         netif_tx_wake_all_queues(bp->dev);
10501         if (BNXT_LINK_IS_UP(bp))
10502                 netif_carrier_on(bp->dev);
10503 }
10504
10505 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
10506 {
10507         u8 active_fec = link_info->active_fec_sig_mode &
10508                         PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
10509
10510         switch (active_fec) {
10511         default:
10512         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
10513                 return "None";
10514         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
10515                 return "Clause 74 BaseR";
10516         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
10517                 return "Clause 91 RS(528,514)";
10518         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
10519                 return "Clause 91 RS544_1XN";
10520         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
10521                 return "Clause 91 RS(544,514)";
10522         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
10523                 return "Clause 91 RS272_1XN";
10524         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
10525                 return "Clause 91 RS(272,257)";
10526         }
10527 }
10528
10529 void bnxt_report_link(struct bnxt *bp)
10530 {
10531         if (BNXT_LINK_IS_UP(bp)) {
10532                 const char *signal = "";
10533                 const char *flow_ctrl;
10534                 const char *duplex;
10535                 u32 speed;
10536                 u16 fec;
10537
10538                 netif_carrier_on(bp->dev);
10539                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
10540                 if (speed == SPEED_UNKNOWN) {
10541                         netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
10542                         return;
10543                 }
10544                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
10545                         duplex = "full";
10546                 else
10547                         duplex = "half";
10548                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
10549                         flow_ctrl = "ON - receive & transmit";
10550                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
10551                         flow_ctrl = "ON - transmit";
10552                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
10553                         flow_ctrl = "ON - receive";
10554                 else
10555                         flow_ctrl = "none";
10556                 if (bp->link_info.phy_qcfg_resp.option_flags &
10557                     PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
10558                         u8 sig_mode = bp->link_info.active_fec_sig_mode &
10559                                       PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
10560                         switch (sig_mode) {
10561                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
10562                                 signal = "(NRZ) ";
10563                                 break;
10564                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
10565                                 signal = "(PAM4 56Gbps) ";
10566                                 break;
10567                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
10568                                 signal = "(PAM4 112Gbps) ";
10569                                 break;
10570                         default:
10571                                 break;
10572                         }
10573                 }
10574                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
10575                             speed, signal, duplex, flow_ctrl);
10576                 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
10577                         netdev_info(bp->dev, "EEE is %s\n",
10578                                     bp->eee.eee_active ? "active" :
10579                                                          "not active");
10580                 fec = bp->link_info.fec_cfg;
10581                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
10582                         netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
10583                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
10584                                     bnxt_report_fec(&bp->link_info));
10585         } else {
10586                 netif_carrier_off(bp->dev);
10587                 netdev_err(bp->dev, "NIC Link is Down\n");
10588         }
10589 }
10590
10591 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
10592 {
10593         if (!resp->supported_speeds_auto_mode &&
10594             !resp->supported_speeds_force_mode &&
10595             !resp->supported_pam4_speeds_auto_mode &&
10596             !resp->supported_pam4_speeds_force_mode &&
10597             !resp->supported_speeds2_auto_mode &&
10598             !resp->supported_speeds2_force_mode)
10599                 return true;
10600         return false;
10601 }
10602
10603 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
10604 {
10605         struct bnxt_link_info *link_info = &bp->link_info;
10606         struct hwrm_port_phy_qcaps_output *resp;
10607         struct hwrm_port_phy_qcaps_input *req;
10608         int rc = 0;
10609
10610         if (bp->hwrm_spec_code < 0x10201)
10611                 return 0;
10612
10613         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
10614         if (rc)
10615                 return rc;
10616
10617         resp = hwrm_req_hold(bp, req);
10618         rc = hwrm_req_send(bp, req);
10619         if (rc)
10620                 goto hwrm_phy_qcaps_exit;
10621
10622         bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
10623         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
10624                 struct ethtool_keee *eee = &bp->eee;
10625                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
10626
10627                 eee->supported_u32 = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
10628                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
10629                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
10630                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
10631                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
10632         }
10633
10634         if (bp->hwrm_spec_code >= 0x10a01) {
10635                 if (bnxt_phy_qcaps_no_speed(resp)) {
10636                         link_info->phy_state = BNXT_PHY_STATE_DISABLED;
10637                         netdev_warn(bp->dev, "Ethernet link disabled\n");
10638                 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
10639                         link_info->phy_state = BNXT_PHY_STATE_ENABLED;
10640                         netdev_info(bp->dev, "Ethernet link enabled\n");
10641                         /* Phy re-enabled, reprobe the speeds */
10642                         link_info->support_auto_speeds = 0;
10643                         link_info->support_pam4_auto_speeds = 0;
10644                         link_info->support_auto_speeds2 = 0;
10645                 }
10646         }
10647         if (resp->supported_speeds_auto_mode)
10648                 link_info->support_auto_speeds =
10649                         le16_to_cpu(resp->supported_speeds_auto_mode);
10650         if (resp->supported_pam4_speeds_auto_mode)
10651                 link_info->support_pam4_auto_speeds =
10652                         le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
10653         if (resp->supported_speeds2_auto_mode)
10654                 link_info->support_auto_speeds2 =
10655                         le16_to_cpu(resp->supported_speeds2_auto_mode);
10656
10657         bp->port_count = resp->port_cnt;
10658
10659 hwrm_phy_qcaps_exit:
10660         hwrm_req_drop(bp, req);
10661         return rc;
10662 }
10663
10664 static bool bnxt_support_dropped(u16 advertising, u16 supported)
10665 {
10666         u16 diff = advertising ^ supported;
10667
10668         return ((supported | diff) != supported);
10669 }
10670
10671 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
10672 {
10673         struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
10674
10675         /* Check if any advertised speeds are no longer supported. The caller
10676          * holds the link_lock mutex, so we can modify link_info settings.
10677          */
10678         if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
10679                 if (bnxt_support_dropped(link_info->advertising,
10680                                          link_info->support_auto_speeds2)) {
10681                         link_info->advertising = link_info->support_auto_speeds2;
10682                         return true;
10683                 }
10684                 return false;
10685         }
10686         if (bnxt_support_dropped(link_info->advertising,
10687                                  link_info->support_auto_speeds)) {
10688                 link_info->advertising = link_info->support_auto_speeds;
10689                 return true;
10690         }
10691         if (bnxt_support_dropped(link_info->advertising_pam4,
10692                                  link_info->support_pam4_auto_speeds)) {
10693                 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
10694                 return true;
10695         }
10696         return false;
10697 }
10698
10699 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
10700 {
10701         struct bnxt_link_info *link_info = &bp->link_info;
10702         struct hwrm_port_phy_qcfg_output *resp;
10703         struct hwrm_port_phy_qcfg_input *req;
10704         u8 link_state = link_info->link_state;
10705         bool support_changed;
10706         int rc;
10707
10708         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
10709         if (rc)
10710                 return rc;
10711
10712         resp = hwrm_req_hold(bp, req);
10713         rc = hwrm_req_send(bp, req);
10714         if (rc) {
10715                 hwrm_req_drop(bp, req);
10716                 if (BNXT_VF(bp) && rc == -ENODEV) {
10717                         netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
10718                         rc = 0;
10719                 }
10720                 return rc;
10721         }
10722
10723         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
10724         link_info->phy_link_status = resp->link;
10725         link_info->duplex = resp->duplex_cfg;
10726         if (bp->hwrm_spec_code >= 0x10800)
10727                 link_info->duplex = resp->duplex_state;
10728         link_info->pause = resp->pause;
10729         link_info->auto_mode = resp->auto_mode;
10730         link_info->auto_pause_setting = resp->auto_pause;
10731         link_info->lp_pause = resp->link_partner_adv_pause;
10732         link_info->force_pause_setting = resp->force_pause;
10733         link_info->duplex_setting = resp->duplex_cfg;
10734         if (link_info->phy_link_status == BNXT_LINK_LINK) {
10735                 link_info->link_speed = le16_to_cpu(resp->link_speed);
10736                 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
10737                         link_info->active_lanes = resp->active_lanes;
10738         } else {
10739                 link_info->link_speed = 0;
10740                 link_info->active_lanes = 0;
10741         }
10742         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
10743         link_info->force_pam4_link_speed =
10744                 le16_to_cpu(resp->force_pam4_link_speed);
10745         link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
10746         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
10747         link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
10748         link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
10749         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
10750         link_info->auto_pam4_link_speeds =
10751                 le16_to_cpu(resp->auto_pam4_link_speed_mask);
10752         link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
10753         link_info->lp_auto_link_speeds =
10754                 le16_to_cpu(resp->link_partner_adv_speeds);
10755         link_info->lp_auto_pam4_link_speeds =
10756                 resp->link_partner_pam4_adv_speeds;
10757         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
10758         link_info->phy_ver[0] = resp->phy_maj;
10759         link_info->phy_ver[1] = resp->phy_min;
10760         link_info->phy_ver[2] = resp->phy_bld;
10761         link_info->media_type = resp->media_type;
10762         link_info->phy_type = resp->phy_type;
10763         link_info->transceiver = resp->xcvr_pkg_type;
10764         link_info->phy_addr = resp->eee_config_phy_addr &
10765                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
10766         link_info->module_status = resp->module_status;
10767
10768         if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
10769                 struct ethtool_keee *eee = &bp->eee;
10770                 u16 fw_speeds;
10771
10772                 eee->eee_active = 0;
10773                 if (resp->eee_config_phy_addr &
10774                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
10775                         eee->eee_active = 1;
10776                         fw_speeds = le16_to_cpu(
10777                                 resp->link_partner_adv_eee_link_speed_mask);
10778                         eee->lp_advertised_u32 =
10779                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
10780                 }
10781
10782                 /* Pull initial EEE config */
10783                 if (!chng_link_state) {
10784                         if (resp->eee_config_phy_addr &
10785                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
10786                                 eee->eee_enabled = 1;
10787
10788                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
10789                         eee->advertised_u32 =
10790                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
10791
10792                         if (resp->eee_config_phy_addr &
10793                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
10794                                 __le32 tmr;
10795
10796                                 eee->tx_lpi_enabled = 1;
10797                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
10798                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
10799                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
10800                         }
10801                 }
10802         }
10803
10804         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
10805         if (bp->hwrm_spec_code >= 0x10504) {
10806                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
10807                 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
10808         }
10809         /* TODO: need to add more logic to report VF link */
10810         if (chng_link_state) {
10811                 if (link_info->phy_link_status == BNXT_LINK_LINK)
10812                         link_info->link_state = BNXT_LINK_STATE_UP;
10813                 else
10814                         link_info->link_state = BNXT_LINK_STATE_DOWN;
10815                 if (link_state != link_info->link_state)
10816                         bnxt_report_link(bp);
10817         } else {
10818                 /* always link down if not require to update link state */
10819                 link_info->link_state = BNXT_LINK_STATE_DOWN;
10820         }
10821         hwrm_req_drop(bp, req);
10822
10823         if (!BNXT_PHY_CFG_ABLE(bp))
10824                 return 0;
10825
10826         support_changed = bnxt_support_speed_dropped(link_info);
10827         if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
10828                 bnxt_hwrm_set_link_setting(bp, true, false);
10829         return 0;
10830 }
10831
10832 static void bnxt_get_port_module_status(struct bnxt *bp)
10833 {
10834         struct bnxt_link_info *link_info = &bp->link_info;
10835         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
10836         u8 module_status;
10837
10838         if (bnxt_update_link(bp, true))
10839                 return;
10840
10841         module_status = link_info->module_status;
10842         switch (module_status) {
10843         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
10844         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
10845         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
10846                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
10847                             bp->pf.port_id);
10848                 if (bp->hwrm_spec_code >= 0x10201) {
10849                         netdev_warn(bp->dev, "Module part number %s\n",
10850                                     resp->phy_vendor_partnumber);
10851                 }
10852                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
10853                         netdev_warn(bp->dev, "TX is disabled\n");
10854                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
10855                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
10856         }
10857 }
10858
10859 static void
10860 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
10861 {
10862         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
10863                 if (bp->hwrm_spec_code >= 0x10201)
10864                         req->auto_pause =
10865                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
10866                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
10867                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
10868                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
10869                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
10870                 req->enables |=
10871                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
10872         } else {
10873                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
10874                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
10875                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
10876                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
10877                 req->enables |=
10878                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
10879                 if (bp->hwrm_spec_code >= 0x10201) {
10880                         req->auto_pause = req->force_pause;
10881                         req->enables |= cpu_to_le32(
10882                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
10883                 }
10884         }
10885 }
10886
10887 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
10888 {
10889         if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
10890                 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
10891                 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
10892                         req->enables |=
10893                                 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
10894                         req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
10895                 } else if (bp->link_info.advertising) {
10896                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
10897                         req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
10898                 }
10899                 if (bp->link_info.advertising_pam4) {
10900                         req->enables |=
10901                                 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
10902                         req->auto_link_pam4_speed_mask =
10903                                 cpu_to_le16(bp->link_info.advertising_pam4);
10904                 }
10905                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
10906                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
10907         } else {
10908                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
10909                 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
10910                         req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
10911                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
10912                         netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
10913                                    (u32)bp->link_info.req_link_speed);
10914                 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
10915                         req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
10916                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
10917                 } else {
10918                         req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
10919                 }
10920         }
10921
10922         /* tell chimp that the setting takes effect immediately */
10923         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
10924 }
10925
10926 int bnxt_hwrm_set_pause(struct bnxt *bp)
10927 {
10928         struct hwrm_port_phy_cfg_input *req;
10929         int rc;
10930
10931         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
10932         if (rc)
10933                 return rc;
10934
10935         bnxt_hwrm_set_pause_common(bp, req);
10936
10937         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
10938             bp->link_info.force_link_chng)
10939                 bnxt_hwrm_set_link_common(bp, req);
10940
10941         rc = hwrm_req_send(bp, req);
10942         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
10943                 /* since changing of pause setting doesn't trigger any link
10944                  * change event, the driver needs to update the current pause
10945                  * result upon successfully return of the phy_cfg command
10946                  */
10947                 bp->link_info.pause =
10948                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
10949                 bp->link_info.auto_pause_setting = 0;
10950                 if (!bp->link_info.force_link_chng)
10951                         bnxt_report_link(bp);
10952         }
10953         bp->link_info.force_link_chng = false;
10954         return rc;
10955 }
10956
10957 static void bnxt_hwrm_set_eee(struct bnxt *bp,
10958                               struct hwrm_port_phy_cfg_input *req)
10959 {
10960         struct ethtool_keee *eee = &bp->eee;
10961
10962         if (eee->eee_enabled) {
10963                 u16 eee_speeds;
10964                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
10965
10966                 if (eee->tx_lpi_enabled)
10967                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
10968                 else
10969                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
10970
10971                 req->flags |= cpu_to_le32(flags);
10972                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised_u32);
10973                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
10974                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
10975         } else {
10976                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
10977         }
10978 }
10979
10980 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
10981 {
10982         struct hwrm_port_phy_cfg_input *req;
10983         int rc;
10984
10985         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
10986         if (rc)
10987                 return rc;
10988
10989         if (set_pause)
10990                 bnxt_hwrm_set_pause_common(bp, req);
10991
10992         bnxt_hwrm_set_link_common(bp, req);
10993
10994         if (set_eee)
10995                 bnxt_hwrm_set_eee(bp, req);
10996         return hwrm_req_send(bp, req);
10997 }
10998
10999 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
11000 {
11001         struct hwrm_port_phy_cfg_input *req;
11002         int rc;
11003
11004         if (!BNXT_SINGLE_PF(bp))
11005                 return 0;
11006
11007         if (pci_num_vf(bp->pdev) &&
11008             !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
11009                 return 0;
11010
11011         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11012         if (rc)
11013                 return rc;
11014
11015         req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
11016         rc = hwrm_req_send(bp, req);
11017         if (!rc) {
11018                 mutex_lock(&bp->link_lock);
11019                 /* Device is not obliged link down in certain scenarios, even
11020                  * when forced. Setting the state unknown is consistent with
11021                  * driver startup and will force link state to be reported
11022                  * during subsequent open based on PORT_PHY_QCFG.
11023                  */
11024                 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
11025                 mutex_unlock(&bp->link_lock);
11026         }
11027         return rc;
11028 }
11029
11030 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
11031 {
11032 #ifdef CONFIG_TEE_BNXT_FW
11033         int rc = tee_bnxt_fw_load();
11034
11035         if (rc)
11036                 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
11037
11038         return rc;
11039 #else
11040         netdev_err(bp->dev, "OP-TEE not supported\n");
11041         return -ENODEV;
11042 #endif
11043 }
11044
11045 static int bnxt_try_recover_fw(struct bnxt *bp)
11046 {
11047         if (bp->fw_health && bp->fw_health->status_reliable) {
11048                 int retry = 0, rc;
11049                 u32 sts;
11050
11051                 do {
11052                         sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11053                         rc = bnxt_hwrm_poll(bp);
11054                         if (!BNXT_FW_IS_BOOTING(sts) &&
11055                             !BNXT_FW_IS_RECOVERING(sts))
11056                                 break;
11057                         retry++;
11058                 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
11059
11060                 if (!BNXT_FW_IS_HEALTHY(sts)) {
11061                         netdev_err(bp->dev,
11062                                    "Firmware not responding, status: 0x%x\n",
11063                                    sts);
11064                         rc = -ENODEV;
11065                 }
11066                 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
11067                         netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
11068                         return bnxt_fw_reset_via_optee(bp);
11069                 }
11070                 return rc;
11071         }
11072
11073         return -ENODEV;
11074 }
11075
11076 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
11077 {
11078         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11079
11080         if (!BNXT_NEW_RM(bp))
11081                 return; /* no resource reservations required */
11082
11083         hw_resc->resv_cp_rings = 0;
11084         hw_resc->resv_stat_ctxs = 0;
11085         hw_resc->resv_irqs = 0;
11086         hw_resc->resv_tx_rings = 0;
11087         hw_resc->resv_rx_rings = 0;
11088         hw_resc->resv_hw_ring_grps = 0;
11089         hw_resc->resv_vnics = 0;
11090         if (!fw_reset) {
11091                 bp->tx_nr_rings = 0;
11092                 bp->rx_nr_rings = 0;
11093         }
11094 }
11095
11096 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
11097 {
11098         int rc;
11099
11100         if (!BNXT_NEW_RM(bp))
11101                 return 0; /* no resource reservations required */
11102
11103         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
11104         if (rc)
11105                 netdev_err(bp->dev, "resc_qcaps failed\n");
11106
11107         bnxt_clear_reservations(bp, fw_reset);
11108
11109         return rc;
11110 }
11111
11112 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
11113 {
11114         struct hwrm_func_drv_if_change_output *resp;
11115         struct hwrm_func_drv_if_change_input *req;
11116         bool fw_reset = !bp->irq_tbl;
11117         bool resc_reinit = false;
11118         int rc, retry = 0;
11119         u32 flags = 0;
11120
11121         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
11122                 return 0;
11123
11124         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
11125         if (rc)
11126                 return rc;
11127
11128         if (up)
11129                 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
11130         resp = hwrm_req_hold(bp, req);
11131
11132         hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
11133         while (retry < BNXT_FW_IF_RETRY) {
11134                 rc = hwrm_req_send(bp, req);
11135                 if (rc != -EAGAIN)
11136                         break;
11137
11138                 msleep(50);
11139                 retry++;
11140         }
11141
11142         if (rc == -EAGAIN) {
11143                 hwrm_req_drop(bp, req);
11144                 return rc;
11145         } else if (!rc) {
11146                 flags = le32_to_cpu(resp->flags);
11147         } else if (up) {
11148                 rc = bnxt_try_recover_fw(bp);
11149                 fw_reset = true;
11150         }
11151         hwrm_req_drop(bp, req);
11152         if (rc)
11153                 return rc;
11154
11155         if (!up) {
11156                 bnxt_inv_fw_health_reg(bp);
11157                 return 0;
11158         }
11159
11160         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
11161                 resc_reinit = true;
11162         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
11163             test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
11164                 fw_reset = true;
11165         else
11166                 bnxt_remap_fw_health_regs(bp);
11167
11168         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
11169                 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
11170                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11171                 return -ENODEV;
11172         }
11173         if (resc_reinit || fw_reset) {
11174                 if (fw_reset) {
11175                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11176                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11177                                 bnxt_ulp_stop(bp);
11178                         bnxt_free_ctx_mem(bp);
11179                         bnxt_dcb_free(bp);
11180                         rc = bnxt_fw_init_one(bp);
11181                         if (rc) {
11182                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11183                                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11184                                 return rc;
11185                         }
11186                         bnxt_clear_int_mode(bp);
11187                         rc = bnxt_init_int_mode(bp);
11188                         if (rc) {
11189                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11190                                 netdev_err(bp->dev, "init int mode failed\n");
11191                                 return rc;
11192                         }
11193                 }
11194                 rc = bnxt_cancel_reservations(bp, fw_reset);
11195         }
11196         return rc;
11197 }
11198
11199 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
11200 {
11201         struct hwrm_port_led_qcaps_output *resp;
11202         struct hwrm_port_led_qcaps_input *req;
11203         struct bnxt_pf_info *pf = &bp->pf;
11204         int rc;
11205
11206         bp->num_leds = 0;
11207         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
11208                 return 0;
11209
11210         rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
11211         if (rc)
11212                 return rc;
11213
11214         req->port_id = cpu_to_le16(pf->port_id);
11215         resp = hwrm_req_hold(bp, req);
11216         rc = hwrm_req_send(bp, req);
11217         if (rc) {
11218                 hwrm_req_drop(bp, req);
11219                 return rc;
11220         }
11221         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
11222                 int i;
11223
11224                 bp->num_leds = resp->num_leds;
11225                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
11226                                                  bp->num_leds);
11227                 for (i = 0; i < bp->num_leds; i++) {
11228                         struct bnxt_led_info *led = &bp->leds[i];
11229                         __le16 caps = led->led_state_caps;
11230
11231                         if (!led->led_group_id ||
11232                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
11233                                 bp->num_leds = 0;
11234                                 break;
11235                         }
11236                 }
11237         }
11238         hwrm_req_drop(bp, req);
11239         return 0;
11240 }
11241
11242 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
11243 {
11244         struct hwrm_wol_filter_alloc_output *resp;
11245         struct hwrm_wol_filter_alloc_input *req;
11246         int rc;
11247
11248         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
11249         if (rc)
11250                 return rc;
11251
11252         req->port_id = cpu_to_le16(bp->pf.port_id);
11253         req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
11254         req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
11255         memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
11256
11257         resp = hwrm_req_hold(bp, req);
11258         rc = hwrm_req_send(bp, req);
11259         if (!rc)
11260                 bp->wol_filter_id = resp->wol_filter_id;
11261         hwrm_req_drop(bp, req);
11262         return rc;
11263 }
11264
11265 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
11266 {
11267         struct hwrm_wol_filter_free_input *req;
11268         int rc;
11269
11270         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
11271         if (rc)
11272                 return rc;
11273
11274         req->port_id = cpu_to_le16(bp->pf.port_id);
11275         req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
11276         req->wol_filter_id = bp->wol_filter_id;
11277
11278         return hwrm_req_send(bp, req);
11279 }
11280
11281 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
11282 {
11283         struct hwrm_wol_filter_qcfg_output *resp;
11284         struct hwrm_wol_filter_qcfg_input *req;
11285         u16 next_handle = 0;
11286         int rc;
11287
11288         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
11289         if (rc)
11290                 return rc;
11291
11292         req->port_id = cpu_to_le16(bp->pf.port_id);
11293         req->handle = cpu_to_le16(handle);
11294         resp = hwrm_req_hold(bp, req);
11295         rc = hwrm_req_send(bp, req);
11296         if (!rc) {
11297                 next_handle = le16_to_cpu(resp->next_handle);
11298                 if (next_handle != 0) {
11299                         if (resp->wol_type ==
11300                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
11301                                 bp->wol = 1;
11302                                 bp->wol_filter_id = resp->wol_filter_id;
11303                         }
11304                 }
11305         }
11306         hwrm_req_drop(bp, req);
11307         return next_handle;
11308 }
11309
11310 static void bnxt_get_wol_settings(struct bnxt *bp)
11311 {
11312         u16 handle = 0;
11313
11314         bp->wol = 0;
11315         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
11316                 return;
11317
11318         do {
11319                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
11320         } while (handle && handle != 0xffff);
11321 }
11322
11323 static bool bnxt_eee_config_ok(struct bnxt *bp)
11324 {
11325         struct ethtool_keee *eee = &bp->eee;
11326         struct bnxt_link_info *link_info = &bp->link_info;
11327
11328         if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
11329                 return true;
11330
11331         if (eee->eee_enabled) {
11332                 u32 advertising =
11333                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
11334
11335                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
11336                         eee->eee_enabled = 0;
11337                         return false;
11338                 }
11339                 if (eee->advertised_u32 & ~advertising) {
11340                         eee->advertised_u32 = advertising & eee->supported_u32;
11341                         return false;
11342                 }
11343         }
11344         return true;
11345 }
11346
11347 static int bnxt_update_phy_setting(struct bnxt *bp)
11348 {
11349         int rc;
11350         bool update_link = false;
11351         bool update_pause = false;
11352         bool update_eee = false;
11353         struct bnxt_link_info *link_info = &bp->link_info;
11354
11355         rc = bnxt_update_link(bp, true);
11356         if (rc) {
11357                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
11358                            rc);
11359                 return rc;
11360         }
11361         if (!BNXT_SINGLE_PF(bp))
11362                 return 0;
11363
11364         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
11365             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
11366             link_info->req_flow_ctrl)
11367                 update_pause = true;
11368         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
11369             link_info->force_pause_setting != link_info->req_flow_ctrl)
11370                 update_pause = true;
11371         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
11372                 if (BNXT_AUTO_MODE(link_info->auto_mode))
11373                         update_link = true;
11374                 if (bnxt_force_speed_updated(link_info))
11375                         update_link = true;
11376                 if (link_info->req_duplex != link_info->duplex_setting)
11377                         update_link = true;
11378         } else {
11379                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
11380                         update_link = true;
11381                 if (bnxt_auto_speed_updated(link_info))
11382                         update_link = true;
11383         }
11384
11385         /* The last close may have shutdown the link, so need to call
11386          * PHY_CFG to bring it back up.
11387          */
11388         if (!BNXT_LINK_IS_UP(bp))
11389                 update_link = true;
11390
11391         if (!bnxt_eee_config_ok(bp))
11392                 update_eee = true;
11393
11394         if (update_link)
11395                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
11396         else if (update_pause)
11397                 rc = bnxt_hwrm_set_pause(bp);
11398         if (rc) {
11399                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
11400                            rc);
11401                 return rc;
11402         }
11403
11404         return rc;
11405 }
11406
11407 /* Common routine to pre-map certain register block to different GRC window.
11408  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
11409  * in PF and 3 windows in VF that can be customized to map in different
11410  * register blocks.
11411  */
11412 static void bnxt_preset_reg_win(struct bnxt *bp)
11413 {
11414         if (BNXT_PF(bp)) {
11415                 /* CAG registers map to GRC window #4 */
11416                 writel(BNXT_CAG_REG_BASE,
11417                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
11418         }
11419 }
11420
11421 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
11422
11423 static int bnxt_reinit_after_abort(struct bnxt *bp)
11424 {
11425         int rc;
11426
11427         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11428                 return -EBUSY;
11429
11430         if (bp->dev->reg_state == NETREG_UNREGISTERED)
11431                 return -ENODEV;
11432
11433         rc = bnxt_fw_init_one(bp);
11434         if (!rc) {
11435                 bnxt_clear_int_mode(bp);
11436                 rc = bnxt_init_int_mode(bp);
11437                 if (!rc) {
11438                         clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11439                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11440                 }
11441         }
11442         return rc;
11443 }
11444
11445 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
11446 {
11447         int rc = 0;
11448
11449         bnxt_preset_reg_win(bp);
11450         netif_carrier_off(bp->dev);
11451         if (irq_re_init) {
11452                 /* Reserve rings now if none were reserved at driver probe. */
11453                 rc = bnxt_init_dflt_ring_mode(bp);
11454                 if (rc) {
11455                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
11456                         return rc;
11457                 }
11458         }
11459         rc = bnxt_reserve_rings(bp, irq_re_init);
11460         if (rc)
11461                 return rc;
11462         if ((bp->flags & BNXT_FLAG_RFS) &&
11463             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
11464                 /* disable RFS if falling back to INTA */
11465                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
11466                 bp->flags &= ~BNXT_FLAG_RFS;
11467         }
11468
11469         rc = bnxt_alloc_mem(bp, irq_re_init);
11470         if (rc) {
11471                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
11472                 goto open_err_free_mem;
11473         }
11474
11475         if (irq_re_init) {
11476                 bnxt_init_napi(bp);
11477                 rc = bnxt_request_irq(bp);
11478                 if (rc) {
11479                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
11480                         goto open_err_irq;
11481                 }
11482         }
11483
11484         rc = bnxt_init_nic(bp, irq_re_init);
11485         if (rc) {
11486                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
11487                 goto open_err_irq;
11488         }
11489
11490         bnxt_enable_napi(bp);
11491         bnxt_debug_dev_init(bp);
11492
11493         if (link_re_init) {
11494                 mutex_lock(&bp->link_lock);
11495                 rc = bnxt_update_phy_setting(bp);
11496                 mutex_unlock(&bp->link_lock);
11497                 if (rc) {
11498                         netdev_warn(bp->dev, "failed to update phy settings\n");
11499                         if (BNXT_SINGLE_PF(bp)) {
11500                                 bp->link_info.phy_retry = true;
11501                                 bp->link_info.phy_retry_expires =
11502                                         jiffies + 5 * HZ;
11503                         }
11504                 }
11505         }
11506
11507         if (irq_re_init)
11508                 udp_tunnel_nic_reset_ntf(bp->dev);
11509
11510         if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
11511                 if (!static_key_enabled(&bnxt_xdp_locking_key))
11512                         static_branch_enable(&bnxt_xdp_locking_key);
11513         } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
11514                 static_branch_disable(&bnxt_xdp_locking_key);
11515         }
11516         set_bit(BNXT_STATE_OPEN, &bp->state);
11517         bnxt_enable_int(bp);
11518         /* Enable TX queues */
11519         bnxt_tx_enable(bp);
11520         mod_timer(&bp->timer, jiffies + bp->current_interval);
11521         /* Poll link status and check for SFP+ module status */
11522         mutex_lock(&bp->link_lock);
11523         bnxt_get_port_module_status(bp);
11524         mutex_unlock(&bp->link_lock);
11525
11526         /* VF-reps may need to be re-opened after the PF is re-opened */
11527         if (BNXT_PF(bp))
11528                 bnxt_vf_reps_open(bp);
11529         bnxt_ptp_init_rtc(bp, true);
11530         bnxt_ptp_cfg_tstamp_filters(bp);
11531         return 0;
11532
11533 open_err_irq:
11534         bnxt_del_napi(bp);
11535
11536 open_err_free_mem:
11537         bnxt_free_skbs(bp);
11538         bnxt_free_irq(bp);
11539         bnxt_free_mem(bp, true);
11540         return rc;
11541 }
11542
11543 /* rtnl_lock held */
11544 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
11545 {
11546         int rc = 0;
11547
11548         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
11549                 rc = -EIO;
11550         if (!rc)
11551                 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
11552         if (rc) {
11553                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
11554                 dev_close(bp->dev);
11555         }
11556         return rc;
11557 }
11558
11559 /* rtnl_lock held, open the NIC half way by allocating all resources, but
11560  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
11561  * self tests.
11562  */
11563 int bnxt_half_open_nic(struct bnxt *bp)
11564 {
11565         int rc = 0;
11566
11567         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
11568                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
11569                 rc = -ENODEV;
11570                 goto half_open_err;
11571         }
11572
11573         rc = bnxt_alloc_mem(bp, true);
11574         if (rc) {
11575                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
11576                 goto half_open_err;
11577         }
11578         bnxt_init_napi(bp);
11579         set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
11580         rc = bnxt_init_nic(bp, true);
11581         if (rc) {
11582                 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
11583                 bnxt_del_napi(bp);
11584                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
11585                 goto half_open_err;
11586         }
11587         return 0;
11588
11589 half_open_err:
11590         bnxt_free_skbs(bp);
11591         bnxt_free_mem(bp, true);
11592         dev_close(bp->dev);
11593         return rc;
11594 }
11595
11596 /* rtnl_lock held, this call can only be made after a previous successful
11597  * call to bnxt_half_open_nic().
11598  */
11599 void bnxt_half_close_nic(struct bnxt *bp)
11600 {
11601         bnxt_hwrm_resource_free(bp, false, true);
11602         bnxt_del_napi(bp);
11603         bnxt_free_skbs(bp);
11604         bnxt_free_mem(bp, true);
11605         clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
11606 }
11607
11608 void bnxt_reenable_sriov(struct bnxt *bp)
11609 {
11610         if (BNXT_PF(bp)) {
11611                 struct bnxt_pf_info *pf = &bp->pf;
11612                 int n = pf->active_vfs;
11613
11614                 if (n)
11615                         bnxt_cfg_hw_sriov(bp, &n, true);
11616         }
11617 }
11618
11619 static int bnxt_open(struct net_device *dev)
11620 {
11621         struct bnxt *bp = netdev_priv(dev);
11622         int rc;
11623
11624         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
11625                 rc = bnxt_reinit_after_abort(bp);
11626                 if (rc) {
11627                         if (rc == -EBUSY)
11628                                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
11629                         else
11630                                 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
11631                         return -ENODEV;
11632                 }
11633         }
11634
11635         rc = bnxt_hwrm_if_change(bp, true);
11636         if (rc)
11637                 return rc;
11638
11639         rc = __bnxt_open_nic(bp, true, true);
11640         if (rc) {
11641                 bnxt_hwrm_if_change(bp, false);
11642         } else {
11643                 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
11644                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11645                                 bnxt_ulp_start(bp, 0);
11646                                 bnxt_reenable_sriov(bp);
11647                         }
11648                 }
11649         }
11650
11651         return rc;
11652 }
11653
11654 static bool bnxt_drv_busy(struct bnxt *bp)
11655 {
11656         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
11657                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
11658 }
11659
11660 static void bnxt_get_ring_stats(struct bnxt *bp,
11661                                 struct rtnl_link_stats64 *stats);
11662
11663 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
11664                              bool link_re_init)
11665 {
11666         /* Close the VF-reps before closing PF */
11667         if (BNXT_PF(bp))
11668                 bnxt_vf_reps_close(bp);
11669
11670         /* Change device state to avoid TX queue wake up's */
11671         bnxt_tx_disable(bp);
11672
11673         clear_bit(BNXT_STATE_OPEN, &bp->state);
11674         smp_mb__after_atomic();
11675         while (bnxt_drv_busy(bp))
11676                 msleep(20);
11677
11678         /* Flush rings and disable interrupts */
11679         bnxt_shutdown_nic(bp, irq_re_init);
11680
11681         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
11682
11683         bnxt_debug_dev_exit(bp);
11684         bnxt_disable_napi(bp);
11685         del_timer_sync(&bp->timer);
11686         bnxt_free_skbs(bp);
11687
11688         /* Save ring stats before shutdown */
11689         if (bp->bnapi && irq_re_init) {
11690                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
11691                 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
11692         }
11693         if (irq_re_init) {
11694                 bnxt_free_irq(bp);
11695                 bnxt_del_napi(bp);
11696         }
11697         bnxt_free_mem(bp, irq_re_init);
11698 }
11699
11700 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
11701 {
11702         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11703                 /* If we get here, it means firmware reset is in progress
11704                  * while we are trying to close.  We can safely proceed with
11705                  * the close because we are holding rtnl_lock().  Some firmware
11706                  * messages may fail as we proceed to close.  We set the
11707                  * ABORT_ERR flag here so that the FW reset thread will later
11708                  * abort when it gets the rtnl_lock() and sees the flag.
11709                  */
11710                 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
11711                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11712         }
11713
11714 #ifdef CONFIG_BNXT_SRIOV
11715         if (bp->sriov_cfg) {
11716                 int rc;
11717
11718                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
11719                                                       !bp->sriov_cfg,
11720                                                       BNXT_SRIOV_CFG_WAIT_TMO);
11721                 if (!rc)
11722                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
11723                 else if (rc < 0)
11724                         netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
11725         }
11726 #endif
11727         __bnxt_close_nic(bp, irq_re_init, link_re_init);
11728 }
11729
11730 static int bnxt_close(struct net_device *dev)
11731 {
11732         struct bnxt *bp = netdev_priv(dev);
11733
11734         bnxt_close_nic(bp, true, true);
11735         bnxt_hwrm_shutdown_link(bp);
11736         bnxt_hwrm_if_change(bp, false);
11737         return 0;
11738 }
11739
11740 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
11741                                    u16 *val)
11742 {
11743         struct hwrm_port_phy_mdio_read_output *resp;
11744         struct hwrm_port_phy_mdio_read_input *req;
11745         int rc;
11746
11747         if (bp->hwrm_spec_code < 0x10a00)
11748                 return -EOPNOTSUPP;
11749
11750         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
11751         if (rc)
11752                 return rc;
11753
11754         req->port_id = cpu_to_le16(bp->pf.port_id);
11755         req->phy_addr = phy_addr;
11756         req->reg_addr = cpu_to_le16(reg & 0x1f);
11757         if (mdio_phy_id_is_c45(phy_addr)) {
11758                 req->cl45_mdio = 1;
11759                 req->phy_addr = mdio_phy_id_prtad(phy_addr);
11760                 req->dev_addr = mdio_phy_id_devad(phy_addr);
11761                 req->reg_addr = cpu_to_le16(reg);
11762         }
11763
11764         resp = hwrm_req_hold(bp, req);
11765         rc = hwrm_req_send(bp, req);
11766         if (!rc)
11767                 *val = le16_to_cpu(resp->reg_data);
11768         hwrm_req_drop(bp, req);
11769         return rc;
11770 }
11771
11772 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
11773                                     u16 val)
11774 {
11775         struct hwrm_port_phy_mdio_write_input *req;
11776         int rc;
11777
11778         if (bp->hwrm_spec_code < 0x10a00)
11779                 return -EOPNOTSUPP;
11780
11781         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
11782         if (rc)
11783                 return rc;
11784
11785         req->port_id = cpu_to_le16(bp->pf.port_id);
11786         req->phy_addr = phy_addr;
11787         req->reg_addr = cpu_to_le16(reg & 0x1f);
11788         if (mdio_phy_id_is_c45(phy_addr)) {
11789                 req->cl45_mdio = 1;
11790                 req->phy_addr = mdio_phy_id_prtad(phy_addr);
11791                 req->dev_addr = mdio_phy_id_devad(phy_addr);
11792                 req->reg_addr = cpu_to_le16(reg);
11793         }
11794         req->reg_data = cpu_to_le16(val);
11795
11796         return hwrm_req_send(bp, req);
11797 }
11798
11799 /* rtnl_lock held */
11800 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11801 {
11802         struct mii_ioctl_data *mdio = if_mii(ifr);
11803         struct bnxt *bp = netdev_priv(dev);
11804         int rc;
11805
11806         switch (cmd) {
11807         case SIOCGMIIPHY:
11808                 mdio->phy_id = bp->link_info.phy_addr;
11809
11810                 fallthrough;
11811         case SIOCGMIIREG: {
11812                 u16 mii_regval = 0;
11813
11814                 if (!netif_running(dev))
11815                         return -EAGAIN;
11816
11817                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
11818                                              &mii_regval);
11819                 mdio->val_out = mii_regval;
11820                 return rc;
11821         }
11822
11823         case SIOCSMIIREG:
11824                 if (!netif_running(dev))
11825                         return -EAGAIN;
11826
11827                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
11828                                                 mdio->val_in);
11829
11830         case SIOCSHWTSTAMP:
11831                 return bnxt_hwtstamp_set(dev, ifr);
11832
11833         case SIOCGHWTSTAMP:
11834                 return bnxt_hwtstamp_get(dev, ifr);
11835
11836         default:
11837                 /* do nothing */
11838                 break;
11839         }
11840         return -EOPNOTSUPP;
11841 }
11842
11843 static void bnxt_get_ring_stats(struct bnxt *bp,
11844                                 struct rtnl_link_stats64 *stats)
11845 {
11846         int i;
11847
11848         for (i = 0; i < bp->cp_nr_rings; i++) {
11849                 struct bnxt_napi *bnapi = bp->bnapi[i];
11850                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11851                 u64 *sw = cpr->stats.sw_stats;
11852
11853                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
11854                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
11855                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
11856
11857                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
11858                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
11859                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
11860
11861                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
11862                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
11863                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
11864
11865                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
11866                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
11867                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
11868
11869                 stats->rx_missed_errors +=
11870                         BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
11871
11872                 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
11873
11874                 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
11875
11876                 stats->rx_dropped +=
11877                         cpr->sw_stats.rx.rx_netpoll_discards +
11878                         cpr->sw_stats.rx.rx_oom_discards;
11879         }
11880 }
11881
11882 static void bnxt_add_prev_stats(struct bnxt *bp,
11883                                 struct rtnl_link_stats64 *stats)
11884 {
11885         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
11886
11887         stats->rx_packets += prev_stats->rx_packets;
11888         stats->tx_packets += prev_stats->tx_packets;
11889         stats->rx_bytes += prev_stats->rx_bytes;
11890         stats->tx_bytes += prev_stats->tx_bytes;
11891         stats->rx_missed_errors += prev_stats->rx_missed_errors;
11892         stats->multicast += prev_stats->multicast;
11893         stats->rx_dropped += prev_stats->rx_dropped;
11894         stats->tx_dropped += prev_stats->tx_dropped;
11895 }
11896
11897 static void
11898 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
11899 {
11900         struct bnxt *bp = netdev_priv(dev);
11901
11902         set_bit(BNXT_STATE_READ_STATS, &bp->state);
11903         /* Make sure bnxt_close_nic() sees that we are reading stats before
11904          * we check the BNXT_STATE_OPEN flag.
11905          */
11906         smp_mb__after_atomic();
11907         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11908                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
11909                 *stats = bp->net_stats_prev;
11910                 return;
11911         }
11912
11913         bnxt_get_ring_stats(bp, stats);
11914         bnxt_add_prev_stats(bp, stats);
11915
11916         if (bp->flags & BNXT_FLAG_PORT_STATS) {
11917                 u64 *rx = bp->port_stats.sw_stats;
11918                 u64 *tx = bp->port_stats.sw_stats +
11919                           BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
11920
11921                 stats->rx_crc_errors =
11922                         BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
11923                 stats->rx_frame_errors =
11924                         BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
11925                 stats->rx_length_errors =
11926                         BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
11927                         BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
11928                         BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
11929                 stats->rx_errors =
11930                         BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
11931                         BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
11932                 stats->collisions =
11933                         BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
11934                 stats->tx_fifo_errors =
11935                         BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
11936                 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
11937         }
11938         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
11939 }
11940
11941 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
11942                                         struct bnxt_total_ring_err_stats *stats,
11943                                         struct bnxt_cp_ring_info *cpr)
11944 {
11945         struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
11946         u64 *hw_stats = cpr->stats.sw_stats;
11947
11948         stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
11949         stats->rx_total_resets += sw_stats->rx.rx_resets;
11950         stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
11951         stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
11952         stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
11953         stats->rx_total_ring_discards +=
11954                 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
11955         stats->tx_total_resets += sw_stats->tx.tx_resets;
11956         stats->tx_total_ring_discards +=
11957                 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
11958         stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
11959 }
11960
11961 void bnxt_get_ring_err_stats(struct bnxt *bp,
11962                              struct bnxt_total_ring_err_stats *stats)
11963 {
11964         int i;
11965
11966         for (i = 0; i < bp->cp_nr_rings; i++)
11967                 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
11968 }
11969
11970 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
11971 {
11972         struct net_device *dev = bp->dev;
11973         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11974         struct netdev_hw_addr *ha;
11975         u8 *haddr;
11976         int mc_count = 0;
11977         bool update = false;
11978         int off = 0;
11979
11980         netdev_for_each_mc_addr(ha, dev) {
11981                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
11982                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11983                         vnic->mc_list_count = 0;
11984                         return false;
11985                 }
11986                 haddr = ha->addr;
11987                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
11988                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
11989                         update = true;
11990                 }
11991                 off += ETH_ALEN;
11992                 mc_count++;
11993         }
11994         if (mc_count)
11995                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
11996
11997         if (mc_count != vnic->mc_list_count) {
11998                 vnic->mc_list_count = mc_count;
11999                 update = true;
12000         }
12001         return update;
12002 }
12003
12004 static bool bnxt_uc_list_updated(struct bnxt *bp)
12005 {
12006         struct net_device *dev = bp->dev;
12007         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12008         struct netdev_hw_addr *ha;
12009         int off = 0;
12010
12011         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
12012                 return true;
12013
12014         netdev_for_each_uc_addr(ha, dev) {
12015                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
12016                         return true;
12017
12018                 off += ETH_ALEN;
12019         }
12020         return false;
12021 }
12022
12023 static void bnxt_set_rx_mode(struct net_device *dev)
12024 {
12025         struct bnxt *bp = netdev_priv(dev);
12026         struct bnxt_vnic_info *vnic;
12027         bool mc_update = false;
12028         bool uc_update;
12029         u32 mask;
12030
12031         if (!test_bit(BNXT_STATE_OPEN, &bp->state))
12032                 return;
12033
12034         vnic = &bp->vnic_info[0];
12035         mask = vnic->rx_mask;
12036         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
12037                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
12038                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
12039                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
12040
12041         if (dev->flags & IFF_PROMISC)
12042                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12043
12044         uc_update = bnxt_uc_list_updated(bp);
12045
12046         if (dev->flags & IFF_BROADCAST)
12047                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
12048         if (dev->flags & IFF_ALLMULTI) {
12049                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12050                 vnic->mc_list_count = 0;
12051         } else if (dev->flags & IFF_MULTICAST) {
12052                 mc_update = bnxt_mc_list_updated(bp, &mask);
12053         }
12054
12055         if (mask != vnic->rx_mask || uc_update || mc_update) {
12056                 vnic->rx_mask = mask;
12057
12058                 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
12059         }
12060 }
12061
12062 static int bnxt_cfg_rx_mode(struct bnxt *bp)
12063 {
12064         struct net_device *dev = bp->dev;
12065         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12066         struct netdev_hw_addr *ha;
12067         int i, off = 0, rc;
12068         bool uc_update;
12069
12070         netif_addr_lock_bh(dev);
12071         uc_update = bnxt_uc_list_updated(bp);
12072         netif_addr_unlock_bh(dev);
12073
12074         if (!uc_update)
12075                 goto skip_uc;
12076
12077         for (i = 1; i < vnic->uc_filter_count; i++) {
12078                 struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
12079
12080                 bnxt_hwrm_l2_filter_free(bp, fltr);
12081                 bnxt_del_l2_filter(bp, fltr);
12082         }
12083
12084         vnic->uc_filter_count = 1;
12085
12086         netif_addr_lock_bh(dev);
12087         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
12088                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12089         } else {
12090                 netdev_for_each_uc_addr(ha, dev) {
12091                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
12092                         off += ETH_ALEN;
12093                         vnic->uc_filter_count++;
12094                 }
12095         }
12096         netif_addr_unlock_bh(dev);
12097
12098         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
12099                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
12100                 if (rc) {
12101                         if (BNXT_VF(bp) && rc == -ENODEV) {
12102                                 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12103                                         netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
12104                                 else
12105                                         netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
12106                                 rc = 0;
12107                         } else {
12108                                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
12109                         }
12110                         vnic->uc_filter_count = i;
12111                         return rc;
12112                 }
12113         }
12114         if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12115                 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
12116
12117 skip_uc:
12118         if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
12119             !bnxt_promisc_ok(bp))
12120                 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12121         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12122         if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
12123                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
12124                             rc);
12125                 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
12126                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12127                 vnic->mc_list_count = 0;
12128                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12129         }
12130         if (rc)
12131                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
12132                            rc);
12133
12134         return rc;
12135 }
12136
12137 static bool bnxt_can_reserve_rings(struct bnxt *bp)
12138 {
12139 #ifdef CONFIG_BNXT_SRIOV
12140         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
12141                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12142
12143                 /* No minimum rings were provisioned by the PF.  Don't
12144                  * reserve rings by default when device is down.
12145                  */
12146                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
12147                         return true;
12148
12149                 if (!netif_running(bp->dev))
12150                         return false;
12151         }
12152 #endif
12153         return true;
12154 }
12155
12156 /* If the chip and firmware supports RFS */
12157 static bool bnxt_rfs_supported(struct bnxt *bp)
12158 {
12159         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
12160                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
12161                         return true;
12162                 return false;
12163         }
12164         /* 212 firmware is broken for aRFS */
12165         if (BNXT_FW_MAJ(bp) == 212)
12166                 return false;
12167         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
12168                 return true;
12169         if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
12170                 return true;
12171         return false;
12172 }
12173
12174 /* If runtime conditions support RFS */
12175 static bool bnxt_rfs_capable(struct bnxt *bp)
12176 {
12177         int vnics, max_vnics, max_rss_ctxs;
12178
12179         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
12180                 return bnxt_rfs_supported(bp);
12181         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
12182                 return false;
12183
12184         vnics = 1 + bp->rx_nr_rings;
12185         max_vnics = bnxt_get_max_func_vnics(bp);
12186         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
12187
12188         /* RSS contexts not a limiting factor */
12189         if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
12190                 max_rss_ctxs = max_vnics;
12191         if (vnics > max_vnics || vnics > max_rss_ctxs) {
12192                 if (bp->rx_nr_rings > 1)
12193                         netdev_warn(bp->dev,
12194                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
12195                                     min(max_rss_ctxs - 1, max_vnics - 1));
12196                 return false;
12197         }
12198
12199         if (!BNXT_NEW_RM(bp))
12200                 return true;
12201
12202         if (vnics == bp->hw_resc.resv_vnics)
12203                 return true;
12204
12205         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
12206         if (vnics <= bp->hw_resc.resv_vnics)
12207                 return true;
12208
12209         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
12210         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
12211         return false;
12212 }
12213
12214 static netdev_features_t bnxt_fix_features(struct net_device *dev,
12215                                            netdev_features_t features)
12216 {
12217         struct bnxt *bp = netdev_priv(dev);
12218         netdev_features_t vlan_features;
12219
12220         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
12221                 features &= ~NETIF_F_NTUPLE;
12222
12223         if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
12224                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12225
12226         if (!(features & NETIF_F_GRO))
12227                 features &= ~NETIF_F_GRO_HW;
12228
12229         if (features & NETIF_F_GRO_HW)
12230                 features &= ~NETIF_F_LRO;
12231
12232         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
12233          * turned on or off together.
12234          */
12235         vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
12236         if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
12237                 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12238                         features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
12239                 else if (vlan_features)
12240                         features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
12241         }
12242 #ifdef CONFIG_BNXT_SRIOV
12243         if (BNXT_VF(bp) && bp->vf.vlan)
12244                 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
12245 #endif
12246         return features;
12247 }
12248
12249 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
12250 {
12251         struct bnxt *bp = netdev_priv(dev);
12252         u32 flags = bp->flags;
12253         u32 changes;
12254         int rc = 0;
12255         bool re_init = false;
12256         bool update_tpa = false;
12257
12258         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
12259         if (features & NETIF_F_GRO_HW)
12260                 flags |= BNXT_FLAG_GRO;
12261         else if (features & NETIF_F_LRO)
12262                 flags |= BNXT_FLAG_LRO;
12263
12264         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
12265                 flags &= ~BNXT_FLAG_TPA;
12266
12267         if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12268                 flags |= BNXT_FLAG_STRIP_VLAN;
12269
12270         if (features & NETIF_F_NTUPLE)
12271                 flags |= BNXT_FLAG_RFS;
12272
12273         changes = flags ^ bp->flags;
12274         if (changes & BNXT_FLAG_TPA) {
12275                 update_tpa = true;
12276                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
12277                     (flags & BNXT_FLAG_TPA) == 0 ||
12278                     (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
12279                         re_init = true;
12280         }
12281
12282         if (changes & ~BNXT_FLAG_TPA)
12283                 re_init = true;
12284
12285         if (flags != bp->flags) {
12286                 u32 old_flags = bp->flags;
12287
12288                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12289                         bp->flags = flags;
12290                         if (update_tpa)
12291                                 bnxt_set_ring_params(bp);
12292                         return rc;
12293                 }
12294
12295                 if (re_init) {
12296                         bnxt_close_nic(bp, false, false);
12297                         bp->flags = flags;
12298                         if (update_tpa)
12299                                 bnxt_set_ring_params(bp);
12300
12301                         return bnxt_open_nic(bp, false, false);
12302                 }
12303                 if (update_tpa) {
12304                         bp->flags = flags;
12305                         rc = bnxt_set_tpa(bp,
12306                                           (flags & BNXT_FLAG_TPA) ?
12307                                           true : false);
12308                         if (rc)
12309                                 bp->flags = old_flags;
12310                 }
12311         }
12312         return rc;
12313 }
12314
12315 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
12316                               u8 **nextp)
12317 {
12318         struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
12319         struct hop_jumbo_hdr *jhdr;
12320         int hdr_count = 0;
12321         u8 *nexthdr;
12322         int start;
12323
12324         /* Check that there are at most 2 IPv6 extension headers, no
12325          * fragment header, and each is <= 64 bytes.
12326          */
12327         start = nw_off + sizeof(*ip6h);
12328         nexthdr = &ip6h->nexthdr;
12329         while (ipv6_ext_hdr(*nexthdr)) {
12330                 struct ipv6_opt_hdr *hp;
12331                 int hdrlen;
12332
12333                 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
12334                     *nexthdr == NEXTHDR_FRAGMENT)
12335                         return false;
12336                 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
12337                                           skb_headlen(skb), NULL);
12338                 if (!hp)
12339                         return false;
12340                 if (*nexthdr == NEXTHDR_AUTH)
12341                         hdrlen = ipv6_authlen(hp);
12342                 else
12343                         hdrlen = ipv6_optlen(hp);
12344
12345                 if (hdrlen > 64)
12346                         return false;
12347
12348                 /* The ext header may be a hop-by-hop header inserted for
12349                  * big TCP purposes. This will be removed before sending
12350                  * from NIC, so do not count it.
12351                  */
12352                 if (*nexthdr == NEXTHDR_HOP) {
12353                         if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
12354                                 goto increment_hdr;
12355
12356                         jhdr = (struct hop_jumbo_hdr *)hp;
12357                         if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
12358                             jhdr->nexthdr != IPPROTO_TCP)
12359                                 goto increment_hdr;
12360
12361                         goto next_hdr;
12362                 }
12363 increment_hdr:
12364                 hdr_count++;
12365 next_hdr:
12366                 nexthdr = &hp->nexthdr;
12367                 start += hdrlen;
12368         }
12369         if (nextp) {
12370                 /* Caller will check inner protocol */
12371                 if (skb->encapsulation) {
12372                         *nextp = nexthdr;
12373                         return true;
12374                 }
12375                 *nextp = NULL;
12376         }
12377         /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
12378         return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
12379 }
12380
12381 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
12382 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
12383 {
12384         struct udphdr *uh = udp_hdr(skb);
12385         __be16 udp_port = uh->dest;
12386
12387         if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
12388             udp_port != bp->vxlan_gpe_port)
12389                 return false;
12390         if (skb->inner_protocol == htons(ETH_P_TEB)) {
12391                 struct ethhdr *eh = inner_eth_hdr(skb);
12392
12393                 switch (eh->h_proto) {
12394                 case htons(ETH_P_IP):
12395                         return true;
12396                 case htons(ETH_P_IPV6):
12397                         return bnxt_exthdr_check(bp, skb,
12398                                                  skb_inner_network_offset(skb),
12399                                                  NULL);
12400                 }
12401         } else if (skb->inner_protocol == htons(ETH_P_IP)) {
12402                 return true;
12403         } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
12404                 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
12405                                          NULL);
12406         }
12407         return false;
12408 }
12409
12410 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
12411 {
12412         switch (l4_proto) {
12413         case IPPROTO_UDP:
12414                 return bnxt_udp_tunl_check(bp, skb);
12415         case IPPROTO_IPIP:
12416                 return true;
12417         case IPPROTO_GRE: {
12418                 switch (skb->inner_protocol) {
12419                 default:
12420                         return false;
12421                 case htons(ETH_P_IP):
12422                         return true;
12423                 case htons(ETH_P_IPV6):
12424                         fallthrough;
12425                 }
12426         }
12427         case IPPROTO_IPV6:
12428                 /* Check ext headers of inner ipv6 */
12429                 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
12430                                          NULL);
12431         }
12432         return false;
12433 }
12434
12435 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
12436                                              struct net_device *dev,
12437                                              netdev_features_t features)
12438 {
12439         struct bnxt *bp = netdev_priv(dev);
12440         u8 *l4_proto;
12441
12442         features = vlan_features_check(skb, features);
12443         switch (vlan_get_protocol(skb)) {
12444         case htons(ETH_P_IP):
12445                 if (!skb->encapsulation)
12446                         return features;
12447                 l4_proto = &ip_hdr(skb)->protocol;
12448                 if (bnxt_tunl_check(bp, skb, *l4_proto))
12449                         return features;
12450                 break;
12451         case htons(ETH_P_IPV6):
12452                 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
12453                                        &l4_proto))
12454                         break;
12455                 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
12456                         return features;
12457                 break;
12458         }
12459         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
12460 }
12461
12462 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
12463                          u32 *reg_buf)
12464 {
12465         struct hwrm_dbg_read_direct_output *resp;
12466         struct hwrm_dbg_read_direct_input *req;
12467         __le32 *dbg_reg_buf;
12468         dma_addr_t mapping;
12469         int rc, i;
12470
12471         rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
12472         if (rc)
12473                 return rc;
12474
12475         dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
12476                                          &mapping);
12477         if (!dbg_reg_buf) {
12478                 rc = -ENOMEM;
12479                 goto dbg_rd_reg_exit;
12480         }
12481
12482         req->host_dest_addr = cpu_to_le64(mapping);
12483
12484         resp = hwrm_req_hold(bp, req);
12485         req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
12486         req->read_len32 = cpu_to_le32(num_words);
12487
12488         rc = hwrm_req_send(bp, req);
12489         if (rc || resp->error_code) {
12490                 rc = -EIO;
12491                 goto dbg_rd_reg_exit;
12492         }
12493         for (i = 0; i < num_words; i++)
12494                 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
12495
12496 dbg_rd_reg_exit:
12497         hwrm_req_drop(bp, req);
12498         return rc;
12499 }
12500
12501 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
12502                                        u32 ring_id, u32 *prod, u32 *cons)
12503 {
12504         struct hwrm_dbg_ring_info_get_output *resp;
12505         struct hwrm_dbg_ring_info_get_input *req;
12506         int rc;
12507
12508         rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
12509         if (rc)
12510                 return rc;
12511
12512         req->ring_type = ring_type;
12513         req->fw_ring_id = cpu_to_le32(ring_id);
12514         resp = hwrm_req_hold(bp, req);
12515         rc = hwrm_req_send(bp, req);
12516         if (!rc) {
12517                 *prod = le32_to_cpu(resp->producer_index);
12518                 *cons = le32_to_cpu(resp->consumer_index);
12519         }
12520         hwrm_req_drop(bp, req);
12521         return rc;
12522 }
12523
12524 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
12525 {
12526         struct bnxt_tx_ring_info *txr;
12527         int i = bnapi->index, j;
12528
12529         bnxt_for_each_napi_tx(j, bnapi, txr)
12530                 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
12531                             i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
12532                             txr->tx_cons);
12533 }
12534
12535 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
12536 {
12537         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
12538         int i = bnapi->index;
12539
12540         if (!rxr)
12541                 return;
12542
12543         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
12544                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
12545                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
12546                     rxr->rx_sw_agg_prod);
12547 }
12548
12549 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
12550 {
12551         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
12552         int i = bnapi->index;
12553
12554         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
12555                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
12556 }
12557
12558 static void bnxt_dbg_dump_states(struct bnxt *bp)
12559 {
12560         int i;
12561         struct bnxt_napi *bnapi;
12562
12563         for (i = 0; i < bp->cp_nr_rings; i++) {
12564                 bnapi = bp->bnapi[i];
12565                 if (netif_msg_drv(bp)) {
12566                         bnxt_dump_tx_sw_state(bnapi);
12567                         bnxt_dump_rx_sw_state(bnapi);
12568                         bnxt_dump_cp_sw_state(bnapi);
12569                 }
12570         }
12571 }
12572
12573 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
12574 {
12575         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
12576         struct hwrm_ring_reset_input *req;
12577         struct bnxt_napi *bnapi = rxr->bnapi;
12578         struct bnxt_cp_ring_info *cpr;
12579         u16 cp_ring_id;
12580         int rc;
12581
12582         rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
12583         if (rc)
12584                 return rc;
12585
12586         cpr = &bnapi->cp_ring;
12587         cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
12588         req->cmpl_ring = cpu_to_le16(cp_ring_id);
12589         req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
12590         req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
12591         return hwrm_req_send_silent(bp, req);
12592 }
12593
12594 static void bnxt_reset_task(struct bnxt *bp, bool silent)
12595 {
12596         if (!silent)
12597                 bnxt_dbg_dump_states(bp);
12598         if (netif_running(bp->dev)) {
12599                 int rc;
12600
12601                 if (silent) {
12602                         bnxt_close_nic(bp, false, false);
12603                         bnxt_open_nic(bp, false, false);
12604                 } else {
12605                         bnxt_ulp_stop(bp);
12606                         bnxt_close_nic(bp, true, false);
12607                         rc = bnxt_open_nic(bp, true, false);
12608                         bnxt_ulp_start(bp, rc);
12609                 }
12610         }
12611 }
12612
12613 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
12614 {
12615         struct bnxt *bp = netdev_priv(dev);
12616
12617         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
12618         bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
12619 }
12620
12621 static void bnxt_fw_health_check(struct bnxt *bp)
12622 {
12623         struct bnxt_fw_health *fw_health = bp->fw_health;
12624         struct pci_dev *pdev = bp->pdev;
12625         u32 val;
12626
12627         if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12628                 return;
12629
12630         /* Make sure it is enabled before checking the tmr_counter. */
12631         smp_rmb();
12632         if (fw_health->tmr_counter) {
12633                 fw_health->tmr_counter--;
12634                 return;
12635         }
12636
12637         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
12638         if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
12639                 fw_health->arrests++;
12640                 goto fw_reset;
12641         }
12642
12643         fw_health->last_fw_heartbeat = val;
12644
12645         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12646         if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
12647                 fw_health->discoveries++;
12648                 goto fw_reset;
12649         }
12650
12651         fw_health->tmr_counter = fw_health->tmr_multiplier;
12652         return;
12653
12654 fw_reset:
12655         bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
12656 }
12657
12658 static void bnxt_timer(struct timer_list *t)
12659 {
12660         struct bnxt *bp = from_timer(bp, t, timer);
12661         struct net_device *dev = bp->dev;
12662
12663         if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
12664                 return;
12665
12666         if (atomic_read(&bp->intr_sem) != 0)
12667                 goto bnxt_restart_timer;
12668
12669         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
12670                 bnxt_fw_health_check(bp);
12671
12672         if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
12673                 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
12674
12675         if (bnxt_tc_flower_enabled(bp))
12676                 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
12677
12678 #ifdef CONFIG_RFS_ACCEL
12679         if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
12680                 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
12681 #endif /*CONFIG_RFS_ACCEL*/
12682
12683         if (bp->link_info.phy_retry) {
12684                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
12685                         bp->link_info.phy_retry = false;
12686                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
12687                 } else {
12688                         bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
12689                 }
12690         }
12691
12692         if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12693                 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
12694
12695         if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
12696                 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
12697
12698 bnxt_restart_timer:
12699         mod_timer(&bp->timer, jiffies + bp->current_interval);
12700 }
12701
12702 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
12703 {
12704         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
12705          * set.  If the device is being closed, bnxt_close() may be holding
12706          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
12707          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
12708          */
12709         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12710         rtnl_lock();
12711 }
12712
12713 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
12714 {
12715         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12716         rtnl_unlock();
12717 }
12718
12719 /* Only called from bnxt_sp_task() */
12720 static void bnxt_reset(struct bnxt *bp, bool silent)
12721 {
12722         bnxt_rtnl_lock_sp(bp);
12723         if (test_bit(BNXT_STATE_OPEN, &bp->state))
12724                 bnxt_reset_task(bp, silent);
12725         bnxt_rtnl_unlock_sp(bp);
12726 }
12727
12728 /* Only called from bnxt_sp_task() */
12729 static void bnxt_rx_ring_reset(struct bnxt *bp)
12730 {
12731         int i;
12732
12733         bnxt_rtnl_lock_sp(bp);
12734         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12735                 bnxt_rtnl_unlock_sp(bp);
12736                 return;
12737         }
12738         /* Disable and flush TPA before resetting the RX ring */
12739         if (bp->flags & BNXT_FLAG_TPA)
12740                 bnxt_set_tpa(bp, false);
12741         for (i = 0; i < bp->rx_nr_rings; i++) {
12742                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
12743                 struct bnxt_cp_ring_info *cpr;
12744                 int rc;
12745
12746                 if (!rxr->bnapi->in_reset)
12747                         continue;
12748
12749                 rc = bnxt_hwrm_rx_ring_reset(bp, i);
12750                 if (rc) {
12751                         if (rc == -EINVAL || rc == -EOPNOTSUPP)
12752                                 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
12753                         else
12754                                 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
12755                                             rc);
12756                         bnxt_reset_task(bp, true);
12757                         break;
12758                 }
12759                 bnxt_free_one_rx_ring_skbs(bp, i);
12760                 rxr->rx_prod = 0;
12761                 rxr->rx_agg_prod = 0;
12762                 rxr->rx_sw_agg_prod = 0;
12763                 rxr->rx_next_cons = 0;
12764                 rxr->bnapi->in_reset = false;
12765                 bnxt_alloc_one_rx_ring(bp, i);
12766                 cpr = &rxr->bnapi->cp_ring;
12767                 cpr->sw_stats.rx.rx_resets++;
12768                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12769                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
12770                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
12771         }
12772         if (bp->flags & BNXT_FLAG_TPA)
12773                 bnxt_set_tpa(bp, true);
12774         bnxt_rtnl_unlock_sp(bp);
12775 }
12776
12777 static void bnxt_fw_reset_close(struct bnxt *bp)
12778 {
12779         bnxt_ulp_stop(bp);
12780         /* When firmware is in fatal state, quiesce device and disable
12781          * bus master to prevent any potential bad DMAs before freeing
12782          * kernel memory.
12783          */
12784         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
12785                 u16 val = 0;
12786
12787                 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12788                 if (val == 0xffff)
12789                         bp->fw_reset_min_dsecs = 0;
12790                 bnxt_tx_disable(bp);
12791                 bnxt_disable_napi(bp);
12792                 bnxt_disable_int_sync(bp);
12793                 bnxt_free_irq(bp);
12794                 bnxt_clear_int_mode(bp);
12795                 pci_disable_device(bp->pdev);
12796         }
12797         __bnxt_close_nic(bp, true, false);
12798         bnxt_vf_reps_free(bp);
12799         bnxt_clear_int_mode(bp);
12800         bnxt_hwrm_func_drv_unrgtr(bp);
12801         if (pci_is_enabled(bp->pdev))
12802                 pci_disable_device(bp->pdev);
12803         bnxt_free_ctx_mem(bp);
12804 }
12805
12806 static bool is_bnxt_fw_ok(struct bnxt *bp)
12807 {
12808         struct bnxt_fw_health *fw_health = bp->fw_health;
12809         bool no_heartbeat = false, has_reset = false;
12810         u32 val;
12811
12812         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
12813         if (val == fw_health->last_fw_heartbeat)
12814                 no_heartbeat = true;
12815
12816         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12817         if (val != fw_health->last_fw_reset_cnt)
12818                 has_reset = true;
12819
12820         if (!no_heartbeat && has_reset)
12821                 return true;
12822
12823         return false;
12824 }
12825
12826 /* rtnl_lock is acquired before calling this function */
12827 static void bnxt_force_fw_reset(struct bnxt *bp)
12828 {
12829         struct bnxt_fw_health *fw_health = bp->fw_health;
12830         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
12831         u32 wait_dsecs;
12832
12833         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
12834             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12835                 return;
12836
12837         if (ptp) {
12838                 spin_lock_bh(&ptp->ptp_lock);
12839                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12840                 spin_unlock_bh(&ptp->ptp_lock);
12841         } else {
12842                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12843         }
12844         bnxt_fw_reset_close(bp);
12845         wait_dsecs = fw_health->master_func_wait_dsecs;
12846         if (fw_health->primary) {
12847                 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
12848                         wait_dsecs = 0;
12849                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12850         } else {
12851                 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
12852                 wait_dsecs = fw_health->normal_func_wait_dsecs;
12853                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12854         }
12855
12856         bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
12857         bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
12858         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12859 }
12860
12861 void bnxt_fw_exception(struct bnxt *bp)
12862 {
12863         netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
12864         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12865         bnxt_rtnl_lock_sp(bp);
12866         bnxt_force_fw_reset(bp);
12867         bnxt_rtnl_unlock_sp(bp);
12868 }
12869
12870 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
12871  * < 0 on error.
12872  */
12873 static int bnxt_get_registered_vfs(struct bnxt *bp)
12874 {
12875 #ifdef CONFIG_BNXT_SRIOV
12876         int rc;
12877
12878         if (!BNXT_PF(bp))
12879                 return 0;
12880
12881         rc = bnxt_hwrm_func_qcfg(bp);
12882         if (rc) {
12883                 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
12884                 return rc;
12885         }
12886         if (bp->pf.registered_vfs)
12887                 return bp->pf.registered_vfs;
12888         if (bp->sriov_cfg)
12889                 return 1;
12890 #endif
12891         return 0;
12892 }
12893
12894 void bnxt_fw_reset(struct bnxt *bp)
12895 {
12896         bnxt_rtnl_lock_sp(bp);
12897         if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
12898             !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12899                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
12900                 int n = 0, tmo;
12901
12902                 if (ptp) {
12903                         spin_lock_bh(&ptp->ptp_lock);
12904                         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12905                         spin_unlock_bh(&ptp->ptp_lock);
12906                 } else {
12907                         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12908                 }
12909                 if (bp->pf.active_vfs &&
12910                     !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
12911                         n = bnxt_get_registered_vfs(bp);
12912                 if (n < 0) {
12913                         netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
12914                                    n);
12915                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12916                         dev_close(bp->dev);
12917                         goto fw_reset_exit;
12918                 } else if (n > 0) {
12919                         u16 vf_tmo_dsecs = n * 10;
12920
12921                         if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
12922                                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
12923                         bp->fw_reset_state =
12924                                 BNXT_FW_RESET_STATE_POLL_VF;
12925                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12926                         goto fw_reset_exit;
12927                 }
12928                 bnxt_fw_reset_close(bp);
12929                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12930                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12931                         tmo = HZ / 10;
12932                 } else {
12933                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12934                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
12935                 }
12936                 bnxt_queue_fw_reset_work(bp, tmo);
12937         }
12938 fw_reset_exit:
12939         bnxt_rtnl_unlock_sp(bp);
12940 }
12941
12942 static void bnxt_chk_missed_irq(struct bnxt *bp)
12943 {
12944         int i;
12945
12946         if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
12947                 return;
12948
12949         for (i = 0; i < bp->cp_nr_rings; i++) {
12950                 struct bnxt_napi *bnapi = bp->bnapi[i];
12951                 struct bnxt_cp_ring_info *cpr;
12952                 u32 fw_ring_id;
12953                 int j;
12954
12955                 if (!bnapi)
12956                         continue;
12957
12958                 cpr = &bnapi->cp_ring;
12959                 for (j = 0; j < cpr->cp_ring_count; j++) {
12960                         struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
12961                         u32 val[2];
12962
12963                         if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
12964                                 continue;
12965
12966                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
12967                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
12968                                 continue;
12969                         }
12970                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
12971                         bnxt_dbg_hwrm_ring_info_get(bp,
12972                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
12973                                 fw_ring_id, &val[0], &val[1]);
12974                         cpr->sw_stats.cmn.missed_irqs++;
12975                 }
12976         }
12977 }
12978
12979 static void bnxt_cfg_ntp_filters(struct bnxt *);
12980
12981 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
12982 {
12983         struct bnxt_link_info *link_info = &bp->link_info;
12984
12985         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
12986                 link_info->autoneg = BNXT_AUTONEG_SPEED;
12987                 if (bp->hwrm_spec_code >= 0x10201) {
12988                         if (link_info->auto_pause_setting &
12989                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
12990                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
12991                 } else {
12992                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
12993                 }
12994                 bnxt_set_auto_speed(link_info);
12995         } else {
12996                 bnxt_set_force_speed(link_info);
12997                 link_info->req_duplex = link_info->duplex_setting;
12998         }
12999         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
13000                 link_info->req_flow_ctrl =
13001                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
13002         else
13003                 link_info->req_flow_ctrl = link_info->force_pause_setting;
13004 }
13005
13006 static void bnxt_fw_echo_reply(struct bnxt *bp)
13007 {
13008         struct bnxt_fw_health *fw_health = bp->fw_health;
13009         struct hwrm_func_echo_response_input *req;
13010         int rc;
13011
13012         rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
13013         if (rc)
13014                 return;
13015         req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
13016         req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
13017         hwrm_req_send(bp, req);
13018 }
13019
13020 static void bnxt_sp_task(struct work_struct *work)
13021 {
13022         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
13023
13024         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13025         smp_mb__after_atomic();
13026         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13027                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13028                 return;
13029         }
13030
13031         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
13032                 bnxt_cfg_rx_mode(bp);
13033
13034         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
13035                 bnxt_cfg_ntp_filters(bp);
13036         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
13037                 bnxt_hwrm_exec_fwd_req(bp);
13038         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
13039                 netdev_info(bp->dev, "Receive PF driver unload event!\n");
13040         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
13041                 bnxt_hwrm_port_qstats(bp, 0);
13042                 bnxt_hwrm_port_qstats_ext(bp, 0);
13043                 bnxt_accumulate_all_stats(bp);
13044         }
13045
13046         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
13047                 int rc;
13048
13049                 mutex_lock(&bp->link_lock);
13050                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
13051                                        &bp->sp_event))
13052                         bnxt_hwrm_phy_qcaps(bp);
13053
13054                 rc = bnxt_update_link(bp, true);
13055                 if (rc)
13056                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
13057                                    rc);
13058
13059                 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
13060                                        &bp->sp_event))
13061                         bnxt_init_ethtool_link_settings(bp);
13062                 mutex_unlock(&bp->link_lock);
13063         }
13064         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
13065                 int rc;
13066
13067                 mutex_lock(&bp->link_lock);
13068                 rc = bnxt_update_phy_setting(bp);
13069                 mutex_unlock(&bp->link_lock);
13070                 if (rc) {
13071                         netdev_warn(bp->dev, "update phy settings retry failed\n");
13072                 } else {
13073                         bp->link_info.phy_retry = false;
13074                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
13075                 }
13076         }
13077         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
13078                 mutex_lock(&bp->link_lock);
13079                 bnxt_get_port_module_status(bp);
13080                 mutex_unlock(&bp->link_lock);
13081         }
13082
13083         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
13084                 bnxt_tc_flow_stats_work(bp);
13085
13086         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
13087                 bnxt_chk_missed_irq(bp);
13088
13089         if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
13090                 bnxt_fw_echo_reply(bp);
13091
13092         if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
13093                 bnxt_hwmon_notify_event(bp);
13094
13095         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
13096          * must be the last functions to be called before exiting.
13097          */
13098         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
13099                 bnxt_reset(bp, false);
13100
13101         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
13102                 bnxt_reset(bp, true);
13103
13104         if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
13105                 bnxt_rx_ring_reset(bp);
13106
13107         if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
13108                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
13109                     test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
13110                         bnxt_devlink_health_fw_report(bp);
13111                 else
13112                         bnxt_fw_reset(bp);
13113         }
13114
13115         if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
13116                 if (!is_bnxt_fw_ok(bp))
13117                         bnxt_devlink_health_fw_report(bp);
13118         }
13119
13120         smp_mb__before_atomic();
13121         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13122 }
13123
13124 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13125                                 int *max_cp);
13126
13127 /* Under rtnl_lock */
13128 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
13129                      int tx_xdp)
13130 {
13131         int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
13132         int tx_rings_needed, stats;
13133         int rx_rings = rx;
13134         int cp, vnics;
13135
13136         if (tcs)
13137                 tx_sets = tcs;
13138
13139         _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
13140
13141         if (max_rx < rx_rings)
13142                 return -ENOMEM;
13143
13144         if (bp->flags & BNXT_FLAG_AGG_RINGS)
13145                 rx_rings <<= 1;
13146
13147         tx_rings_needed = tx * tx_sets + tx_xdp;
13148         if (max_tx < tx_rings_needed)
13149                 return -ENOMEM;
13150
13151         vnics = 1;
13152         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) ==
13153             BNXT_FLAG_RFS)
13154                 vnics += rx;
13155
13156         tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp);
13157         cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
13158         if (max_cp < cp)
13159                 return -ENOMEM;
13160         stats = cp;
13161         if (BNXT_NEW_RM(bp)) {
13162                 cp += bnxt_get_ulp_msix_num(bp);
13163                 stats += bnxt_get_ulp_stat_ctxs(bp);
13164         }
13165         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
13166                                      stats, vnics);
13167 }
13168
13169 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
13170 {
13171         if (bp->bar2) {
13172                 pci_iounmap(pdev, bp->bar2);
13173                 bp->bar2 = NULL;
13174         }
13175
13176         if (bp->bar1) {
13177                 pci_iounmap(pdev, bp->bar1);
13178                 bp->bar1 = NULL;
13179         }
13180
13181         if (bp->bar0) {
13182                 pci_iounmap(pdev, bp->bar0);
13183                 bp->bar0 = NULL;
13184         }
13185 }
13186
13187 static void bnxt_cleanup_pci(struct bnxt *bp)
13188 {
13189         bnxt_unmap_bars(bp, bp->pdev);
13190         pci_release_regions(bp->pdev);
13191         if (pci_is_enabled(bp->pdev))
13192                 pci_disable_device(bp->pdev);
13193 }
13194
13195 static void bnxt_init_dflt_coal(struct bnxt *bp)
13196 {
13197         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
13198         struct bnxt_coal *coal;
13199         u16 flags = 0;
13200
13201         if (coal_cap->cmpl_params &
13202             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
13203                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
13204
13205         /* Tick values in micro seconds.
13206          * 1 coal_buf x bufs_per_record = 1 completion record.
13207          */
13208         coal = &bp->rx_coal;
13209         coal->coal_ticks = 10;
13210         coal->coal_bufs = 30;
13211         coal->coal_ticks_irq = 1;
13212         coal->coal_bufs_irq = 2;
13213         coal->idle_thresh = 50;
13214         coal->bufs_per_record = 2;
13215         coal->budget = 64;              /* NAPI budget */
13216         coal->flags = flags;
13217
13218         coal = &bp->tx_coal;
13219         coal->coal_ticks = 28;
13220         coal->coal_bufs = 30;
13221         coal->coal_ticks_irq = 2;
13222         coal->coal_bufs_irq = 2;
13223         coal->bufs_per_record = 1;
13224         coal->flags = flags;
13225
13226         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
13227 }
13228
13229 /* FW that pre-reserves 1 VNIC per function */
13230 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
13231 {
13232         u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
13233
13234         if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13235             (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
13236                 return true;
13237         if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13238             (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
13239                 return true;
13240         return false;
13241 }
13242
13243 static int bnxt_fw_init_one_p1(struct bnxt *bp)
13244 {
13245         int rc;
13246
13247         bp->fw_cap = 0;
13248         rc = bnxt_hwrm_ver_get(bp);
13249         /* FW may be unresponsive after FLR. FLR must complete within 100 msec
13250          * so wait before continuing with recovery.
13251          */
13252         if (rc)
13253                 msleep(100);
13254         bnxt_try_map_fw_health_reg(bp);
13255         if (rc) {
13256                 rc = bnxt_try_recover_fw(bp);
13257                 if (rc)
13258                         return rc;
13259                 rc = bnxt_hwrm_ver_get(bp);
13260                 if (rc)
13261                         return rc;
13262         }
13263
13264         bnxt_nvm_cfg_ver_get(bp);
13265
13266         rc = bnxt_hwrm_func_reset(bp);
13267         if (rc)
13268                 return -ENODEV;
13269
13270         bnxt_hwrm_fw_set_time(bp);
13271         return 0;
13272 }
13273
13274 static int bnxt_fw_init_one_p2(struct bnxt *bp)
13275 {
13276         int rc;
13277
13278         /* Get the MAX capabilities for this function */
13279         rc = bnxt_hwrm_func_qcaps(bp);
13280         if (rc) {
13281                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
13282                            rc);
13283                 return -ENODEV;
13284         }
13285
13286         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
13287         if (rc)
13288                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
13289                             rc);
13290
13291         if (bnxt_alloc_fw_health(bp)) {
13292                 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
13293         } else {
13294                 rc = bnxt_hwrm_error_recovery_qcfg(bp);
13295                 if (rc)
13296                         netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
13297                                     rc);
13298         }
13299
13300         rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
13301         if (rc)
13302                 return -ENODEV;
13303
13304         if (bnxt_fw_pre_resv_vnics(bp))
13305                 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
13306
13307         bnxt_hwrm_func_qcfg(bp);
13308         bnxt_hwrm_vnic_qcaps(bp);
13309         bnxt_hwrm_port_led_qcaps(bp);
13310         bnxt_ethtool_init(bp);
13311         if (bp->fw_cap & BNXT_FW_CAP_PTP)
13312                 __bnxt_hwrm_ptp_qcfg(bp);
13313         bnxt_dcb_init(bp);
13314         bnxt_hwmon_init(bp);
13315         return 0;
13316 }
13317
13318 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
13319 {
13320         bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
13321         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
13322                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
13323                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
13324                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
13325         if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
13326                 bp->rss_hash_delta = bp->rss_hash_cfg;
13327         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
13328                 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
13329                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
13330                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
13331         }
13332 }
13333
13334 static void bnxt_set_dflt_rfs(struct bnxt *bp)
13335 {
13336         struct net_device *dev = bp->dev;
13337
13338         dev->hw_features &= ~NETIF_F_NTUPLE;
13339         dev->features &= ~NETIF_F_NTUPLE;
13340         bp->flags &= ~BNXT_FLAG_RFS;
13341         if (bnxt_rfs_supported(bp)) {
13342                 dev->hw_features |= NETIF_F_NTUPLE;
13343                 if (bnxt_rfs_capable(bp)) {
13344                         bp->flags |= BNXT_FLAG_RFS;
13345                         dev->features |= NETIF_F_NTUPLE;
13346                 }
13347         }
13348 }
13349
13350 static void bnxt_fw_init_one_p3(struct bnxt *bp)
13351 {
13352         struct pci_dev *pdev = bp->pdev;
13353
13354         bnxt_set_dflt_rss_hash_type(bp);
13355         bnxt_set_dflt_rfs(bp);
13356
13357         bnxt_get_wol_settings(bp);
13358         if (bp->flags & BNXT_FLAG_WOL_CAP)
13359                 device_set_wakeup_enable(&pdev->dev, bp->wol);
13360         else
13361                 device_set_wakeup_capable(&pdev->dev, false);
13362
13363         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
13364         bnxt_hwrm_coal_params_qcaps(bp);
13365 }
13366
13367 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
13368
13369 int bnxt_fw_init_one(struct bnxt *bp)
13370 {
13371         int rc;
13372
13373         rc = bnxt_fw_init_one_p1(bp);
13374         if (rc) {
13375                 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
13376                 return rc;
13377         }
13378         rc = bnxt_fw_init_one_p2(bp);
13379         if (rc) {
13380                 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
13381                 return rc;
13382         }
13383         rc = bnxt_probe_phy(bp, false);
13384         if (rc)
13385                 return rc;
13386         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
13387         if (rc)
13388                 return rc;
13389
13390         bnxt_fw_init_one_p3(bp);
13391         return 0;
13392 }
13393
13394 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
13395 {
13396         struct bnxt_fw_health *fw_health = bp->fw_health;
13397         u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
13398         u32 val = fw_health->fw_reset_seq_vals[reg_idx];
13399         u32 reg_type, reg_off, delay_msecs;
13400
13401         delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
13402         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
13403         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
13404         switch (reg_type) {
13405         case BNXT_FW_HEALTH_REG_TYPE_CFG:
13406                 pci_write_config_dword(bp->pdev, reg_off, val);
13407                 break;
13408         case BNXT_FW_HEALTH_REG_TYPE_GRC:
13409                 writel(reg_off & BNXT_GRC_BASE_MASK,
13410                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
13411                 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
13412                 fallthrough;
13413         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
13414                 writel(val, bp->bar0 + reg_off);
13415                 break;
13416         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
13417                 writel(val, bp->bar1 + reg_off);
13418                 break;
13419         }
13420         if (delay_msecs) {
13421                 pci_read_config_dword(bp->pdev, 0, &val);
13422                 msleep(delay_msecs);
13423         }
13424 }
13425
13426 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
13427 {
13428         struct hwrm_func_qcfg_output *resp;
13429         struct hwrm_func_qcfg_input *req;
13430         bool result = true; /* firmware will enforce if unknown */
13431
13432         if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
13433                 return result;
13434
13435         if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
13436                 return result;
13437
13438         req->fid = cpu_to_le16(0xffff);
13439         resp = hwrm_req_hold(bp, req);
13440         if (!hwrm_req_send(bp, req))
13441                 result = !!(le16_to_cpu(resp->flags) &
13442                             FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
13443         hwrm_req_drop(bp, req);
13444         return result;
13445 }
13446
13447 static void bnxt_reset_all(struct bnxt *bp)
13448 {
13449         struct bnxt_fw_health *fw_health = bp->fw_health;
13450         int i, rc;
13451
13452         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13453                 bnxt_fw_reset_via_optee(bp);
13454                 bp->fw_reset_timestamp = jiffies;
13455                 return;
13456         }
13457
13458         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
13459                 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
13460                         bnxt_fw_reset_writel(bp, i);
13461         } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
13462                 struct hwrm_fw_reset_input *req;
13463
13464                 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
13465                 if (!rc) {
13466                         req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
13467                         req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
13468                         req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
13469                         req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
13470                         rc = hwrm_req_send(bp, req);
13471                 }
13472                 if (rc != -ENODEV)
13473                         netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
13474         }
13475         bp->fw_reset_timestamp = jiffies;
13476 }
13477
13478 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
13479 {
13480         return time_after(jiffies, bp->fw_reset_timestamp +
13481                           (bp->fw_reset_max_dsecs * HZ / 10));
13482 }
13483
13484 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
13485 {
13486         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13487         if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
13488                 bnxt_ulp_start(bp, rc);
13489                 bnxt_dl_health_fw_status_update(bp, false);
13490         }
13491         bp->fw_reset_state = 0;
13492         dev_close(bp->dev);
13493 }
13494
13495 static void bnxt_fw_reset_task(struct work_struct *work)
13496 {
13497         struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
13498         int rc = 0;
13499
13500         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13501                 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
13502                 return;
13503         }
13504
13505         switch (bp->fw_reset_state) {
13506         case BNXT_FW_RESET_STATE_POLL_VF: {
13507                 int n = bnxt_get_registered_vfs(bp);
13508                 int tmo;
13509
13510                 if (n < 0) {
13511                         netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
13512                                    n, jiffies_to_msecs(jiffies -
13513                                    bp->fw_reset_timestamp));
13514                         goto fw_reset_abort;
13515                 } else if (n > 0) {
13516                         if (bnxt_fw_reset_timeout(bp)) {
13517                                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13518                                 bp->fw_reset_state = 0;
13519                                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
13520                                            n);
13521                                 return;
13522                         }
13523                         bnxt_queue_fw_reset_work(bp, HZ / 10);
13524                         return;
13525                 }
13526                 bp->fw_reset_timestamp = jiffies;
13527                 rtnl_lock();
13528                 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13529                         bnxt_fw_reset_abort(bp, rc);
13530                         rtnl_unlock();
13531                         return;
13532                 }
13533                 bnxt_fw_reset_close(bp);
13534                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13535                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
13536                         tmo = HZ / 10;
13537                 } else {
13538                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13539                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
13540                 }
13541                 rtnl_unlock();
13542                 bnxt_queue_fw_reset_work(bp, tmo);
13543                 return;
13544         }
13545         case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
13546                 u32 val;
13547
13548                 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
13549                 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
13550                     !bnxt_fw_reset_timeout(bp)) {
13551                         bnxt_queue_fw_reset_work(bp, HZ / 5);
13552                         return;
13553                 }
13554
13555                 if (!bp->fw_health->primary) {
13556                         u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
13557
13558                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13559                         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
13560                         return;
13561                 }
13562                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
13563         }
13564                 fallthrough;
13565         case BNXT_FW_RESET_STATE_RESET_FW:
13566                 bnxt_reset_all(bp);
13567                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13568                 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
13569                 return;
13570         case BNXT_FW_RESET_STATE_ENABLE_DEV:
13571                 bnxt_inv_fw_health_reg(bp);
13572                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
13573                     !bp->fw_reset_min_dsecs) {
13574                         u16 val;
13575
13576                         pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
13577                         if (val == 0xffff) {
13578                                 if (bnxt_fw_reset_timeout(bp)) {
13579                                         netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
13580                                         rc = -ETIMEDOUT;
13581                                         goto fw_reset_abort;
13582                                 }
13583                                 bnxt_queue_fw_reset_work(bp, HZ / 1000);
13584                                 return;
13585                         }
13586                 }
13587                 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
13588                 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
13589                 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
13590                     !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
13591                         bnxt_dl_remote_reload(bp);
13592                 if (pci_enable_device(bp->pdev)) {
13593                         netdev_err(bp->dev, "Cannot re-enable PCI device\n");
13594                         rc = -ENODEV;
13595                         goto fw_reset_abort;
13596                 }
13597                 pci_set_master(bp->pdev);
13598                 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
13599                 fallthrough;
13600         case BNXT_FW_RESET_STATE_POLL_FW:
13601                 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
13602                 rc = bnxt_hwrm_poll(bp);
13603                 if (rc) {
13604                         if (bnxt_fw_reset_timeout(bp)) {
13605                                 netdev_err(bp->dev, "Firmware reset aborted\n");
13606                                 goto fw_reset_abort_status;
13607                         }
13608                         bnxt_queue_fw_reset_work(bp, HZ / 5);
13609                         return;
13610                 }
13611                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
13612                 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
13613                 fallthrough;
13614         case BNXT_FW_RESET_STATE_OPENING:
13615                 while (!rtnl_trylock()) {
13616                         bnxt_queue_fw_reset_work(bp, HZ / 10);
13617                         return;
13618                 }
13619                 rc = bnxt_open(bp->dev);
13620                 if (rc) {
13621                         netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
13622                         bnxt_fw_reset_abort(bp, rc);
13623                         rtnl_unlock();
13624                         return;
13625                 }
13626
13627                 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
13628                     bp->fw_health->enabled) {
13629                         bp->fw_health->last_fw_reset_cnt =
13630                                 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13631                 }
13632                 bp->fw_reset_state = 0;
13633                 /* Make sure fw_reset_state is 0 before clearing the flag */
13634                 smp_mb__before_atomic();
13635                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13636                 bnxt_ulp_start(bp, 0);
13637                 bnxt_reenable_sriov(bp);
13638                 bnxt_vf_reps_alloc(bp);
13639                 bnxt_vf_reps_open(bp);
13640                 bnxt_ptp_reapply_pps(bp);
13641                 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
13642                 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
13643                         bnxt_dl_health_fw_recovery_done(bp);
13644                         bnxt_dl_health_fw_status_update(bp, true);
13645                 }
13646                 rtnl_unlock();
13647                 break;
13648         }
13649         return;
13650
13651 fw_reset_abort_status:
13652         if (bp->fw_health->status_reliable ||
13653             (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
13654                 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
13655
13656                 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
13657         }
13658 fw_reset_abort:
13659         rtnl_lock();
13660         bnxt_fw_reset_abort(bp, rc);
13661         rtnl_unlock();
13662 }
13663
13664 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
13665 {
13666         int rc;
13667         struct bnxt *bp = netdev_priv(dev);
13668
13669         SET_NETDEV_DEV(dev, &pdev->dev);
13670
13671         /* enable device (incl. PCI PM wakeup), and bus-mastering */
13672         rc = pci_enable_device(pdev);
13673         if (rc) {
13674                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
13675                 goto init_err;
13676         }
13677
13678         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13679                 dev_err(&pdev->dev,
13680                         "Cannot find PCI device base address, aborting\n");
13681                 rc = -ENODEV;
13682                 goto init_err_disable;
13683         }
13684
13685         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13686         if (rc) {
13687                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
13688                 goto init_err_disable;
13689         }
13690
13691         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
13692             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
13693                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
13694                 rc = -EIO;
13695                 goto init_err_release;
13696         }
13697
13698         pci_set_master(pdev);
13699
13700         bp->dev = dev;
13701         bp->pdev = pdev;
13702
13703         /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
13704          * determines the BAR size.
13705          */
13706         bp->bar0 = pci_ioremap_bar(pdev, 0);
13707         if (!bp->bar0) {
13708                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
13709                 rc = -ENOMEM;
13710                 goto init_err_release;
13711         }
13712
13713         bp->bar2 = pci_ioremap_bar(pdev, 4);
13714         if (!bp->bar2) {
13715                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
13716                 rc = -ENOMEM;
13717                 goto init_err_release;
13718         }
13719
13720         INIT_WORK(&bp->sp_task, bnxt_sp_task);
13721         INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
13722
13723         spin_lock_init(&bp->ntp_fltr_lock);
13724 #if BITS_PER_LONG == 32
13725         spin_lock_init(&bp->db_lock);
13726 #endif
13727
13728         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
13729         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
13730
13731         timer_setup(&bp->timer, bnxt_timer, 0);
13732         bp->current_interval = BNXT_TIMER_INTERVAL;
13733
13734         bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
13735         bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
13736
13737         clear_bit(BNXT_STATE_OPEN, &bp->state);
13738         return 0;
13739
13740 init_err_release:
13741         bnxt_unmap_bars(bp, pdev);
13742         pci_release_regions(pdev);
13743
13744 init_err_disable:
13745         pci_disable_device(pdev);
13746
13747 init_err:
13748         return rc;
13749 }
13750
13751 /* rtnl_lock held */
13752 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
13753 {
13754         struct sockaddr *addr = p;
13755         struct bnxt *bp = netdev_priv(dev);
13756         int rc = 0;
13757
13758         if (!is_valid_ether_addr(addr->sa_data))
13759                 return -EADDRNOTAVAIL;
13760
13761         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
13762                 return 0;
13763
13764         rc = bnxt_approve_mac(bp, addr->sa_data, true);
13765         if (rc)
13766                 return rc;
13767
13768         eth_hw_addr_set(dev, addr->sa_data);
13769         if (netif_running(dev)) {
13770                 bnxt_close_nic(bp, false, false);
13771                 rc = bnxt_open_nic(bp, false, false);
13772         }
13773
13774         return rc;
13775 }
13776
13777 /* rtnl_lock held */
13778 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
13779 {
13780         struct bnxt *bp = netdev_priv(dev);
13781
13782         if (netif_running(dev))
13783                 bnxt_close_nic(bp, true, false);
13784
13785         dev->mtu = new_mtu;
13786         bnxt_set_ring_params(bp);
13787
13788         if (netif_running(dev))
13789                 return bnxt_open_nic(bp, true, false);
13790
13791         return 0;
13792 }
13793
13794 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
13795 {
13796         struct bnxt *bp = netdev_priv(dev);
13797         bool sh = false;
13798         int rc, tx_cp;
13799
13800         if (tc > bp->max_tc) {
13801                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
13802                            tc, bp->max_tc);
13803                 return -EINVAL;
13804         }
13805
13806         if (bp->num_tc == tc)
13807                 return 0;
13808
13809         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
13810                 sh = true;
13811
13812         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
13813                               sh, tc, bp->tx_nr_rings_xdp);
13814         if (rc)
13815                 return rc;
13816
13817         /* Needs to close the device and do hw resource re-allocations */
13818         if (netif_running(bp->dev))
13819                 bnxt_close_nic(bp, true, false);
13820
13821         if (tc) {
13822                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
13823                 netdev_set_num_tc(dev, tc);
13824                 bp->num_tc = tc;
13825         } else {
13826                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13827                 netdev_reset_tc(dev);
13828                 bp->num_tc = 0;
13829         }
13830         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
13831         tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
13832         bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
13833                                tx_cp + bp->rx_nr_rings;
13834
13835         if (netif_running(bp->dev))
13836                 return bnxt_open_nic(bp, true, false);
13837
13838         return 0;
13839 }
13840
13841 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
13842                                   void *cb_priv)
13843 {
13844         struct bnxt *bp = cb_priv;
13845
13846         if (!bnxt_tc_flower_enabled(bp) ||
13847             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
13848                 return -EOPNOTSUPP;
13849
13850         switch (type) {
13851         case TC_SETUP_CLSFLOWER:
13852                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
13853         default:
13854                 return -EOPNOTSUPP;
13855         }
13856 }
13857
13858 LIST_HEAD(bnxt_block_cb_list);
13859
13860 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
13861                          void *type_data)
13862 {
13863         struct bnxt *bp = netdev_priv(dev);
13864
13865         switch (type) {
13866         case TC_SETUP_BLOCK:
13867                 return flow_block_cb_setup_simple(type_data,
13868                                                   &bnxt_block_cb_list,
13869                                                   bnxt_setup_tc_block_cb,
13870                                                   bp, bp, true);
13871         case TC_SETUP_QDISC_MQPRIO: {
13872                 struct tc_mqprio_qopt *mqprio = type_data;
13873
13874                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
13875
13876                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
13877         }
13878         default:
13879                 return -EOPNOTSUPP;
13880         }
13881 }
13882
13883 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
13884                             const struct sk_buff *skb)
13885 {
13886         struct bnxt_vnic_info *vnic;
13887
13888         if (skb)
13889                 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
13890
13891         vnic = &bp->vnic_info[0];
13892         return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
13893 }
13894
13895 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
13896                            u32 idx)
13897 {
13898         struct hlist_head *head;
13899         int bit_id;
13900
13901         spin_lock_bh(&bp->ntp_fltr_lock);
13902         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0);
13903         if (bit_id < 0) {
13904                 spin_unlock_bh(&bp->ntp_fltr_lock);
13905                 return -ENOMEM;
13906         }
13907
13908         fltr->base.sw_id = (u16)bit_id;
13909         fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
13910         fltr->base.flags |= BNXT_ACT_RING_DST;
13911         head = &bp->ntp_fltr_hash_tbl[idx];
13912         hlist_add_head_rcu(&fltr->base.hash, head);
13913         set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
13914         bp->ntp_fltr_count++;
13915         spin_unlock_bh(&bp->ntp_fltr_lock);
13916         return 0;
13917 }
13918
13919 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
13920                             struct bnxt_ntuple_filter *f2)
13921 {
13922         struct flow_keys *keys1 = &f1->fkeys;
13923         struct flow_keys *keys2 = &f2->fkeys;
13924
13925         if (f1->ntuple_flags != f2->ntuple_flags)
13926                 return false;
13927
13928         if (keys1->basic.n_proto != keys2->basic.n_proto ||
13929             keys1->basic.ip_proto != keys2->basic.ip_proto)
13930                 return false;
13931
13932         if (keys1->basic.n_proto == htons(ETH_P_IP)) {
13933                 if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
13934                      keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) ||
13935                     ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
13936                      keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst))
13937                         return false;
13938         } else {
13939                 if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
13940                      memcmp(&keys1->addrs.v6addrs.src,
13941                             &keys2->addrs.v6addrs.src,
13942                             sizeof(keys1->addrs.v6addrs.src))) ||
13943                     ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
13944                      memcmp(&keys1->addrs.v6addrs.dst,
13945                             &keys2->addrs.v6addrs.dst,
13946                             sizeof(keys1->addrs.v6addrs.dst))))
13947                         return false;
13948         }
13949
13950         if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) &&
13951              keys1->ports.src != keys2->ports.src) ||
13952             ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) &&
13953              keys1->ports.dst != keys2->ports.dst))
13954                 return false;
13955
13956         if (keys1->control.flags == keys2->control.flags &&
13957             f1->l2_fltr == f2->l2_fltr)
13958                 return true;
13959
13960         return false;
13961 }
13962
13963 struct bnxt_ntuple_filter *
13964 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
13965                                 struct bnxt_ntuple_filter *fltr, u32 idx)
13966 {
13967         struct bnxt_ntuple_filter *f;
13968         struct hlist_head *head;
13969
13970         head = &bp->ntp_fltr_hash_tbl[idx];
13971         hlist_for_each_entry_rcu(f, head, base.hash) {
13972                 if (bnxt_fltr_match(f, fltr))
13973                         return f;
13974         }
13975         return NULL;
13976 }
13977
13978 #ifdef CONFIG_RFS_ACCEL
13979 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
13980                               u16 rxq_index, u32 flow_id)
13981 {
13982         struct bnxt *bp = netdev_priv(dev);
13983         struct bnxt_ntuple_filter *fltr, *new_fltr;
13984         struct flow_keys *fkeys;
13985         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
13986         struct bnxt_l2_filter *l2_fltr;
13987         int rc = 0, idx;
13988         u32 flags;
13989
13990         if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
13991                 l2_fltr = bp->vnic_info[0].l2_filters[0];
13992                 atomic_inc(&l2_fltr->refcnt);
13993         } else {
13994                 struct bnxt_l2_key key;
13995
13996                 ether_addr_copy(key.dst_mac_addr, eth->h_dest);
13997                 key.vlan = 0;
13998                 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
13999                 if (!l2_fltr)
14000                         return -EINVAL;
14001                 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
14002                         bnxt_del_l2_filter(bp, l2_fltr);
14003                         return -EINVAL;
14004                 }
14005         }
14006         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
14007         if (!new_fltr) {
14008                 bnxt_del_l2_filter(bp, l2_fltr);
14009                 return -ENOMEM;
14010         }
14011
14012         fkeys = &new_fltr->fkeys;
14013         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
14014                 rc = -EPROTONOSUPPORT;
14015                 goto err_free;
14016         }
14017
14018         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
14019              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
14020             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
14021              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
14022                 rc = -EPROTONOSUPPORT;
14023                 goto err_free;
14024         }
14025         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
14026             bp->hwrm_spec_code < 0x10601) {
14027                 rc = -EPROTONOSUPPORT;
14028                 goto err_free;
14029         }
14030         flags = fkeys->control.flags;
14031         if (((flags & FLOW_DIS_ENCAPSULATION) &&
14032              bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
14033                 rc = -EPROTONOSUPPORT;
14034                 goto err_free;
14035         }
14036
14037         new_fltr->l2_fltr = l2_fltr;
14038         new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL;
14039
14040         idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
14041         rcu_read_lock();
14042         fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
14043         if (fltr) {
14044                 rc = fltr->base.sw_id;
14045                 rcu_read_unlock();
14046                 goto err_free;
14047         }
14048         rcu_read_unlock();
14049
14050         new_fltr->flow_id = flow_id;
14051         new_fltr->base.rxq = rxq_index;
14052         rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
14053         if (!rc) {
14054                 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14055                 return new_fltr->base.sw_id;
14056         }
14057
14058 err_free:
14059         bnxt_del_l2_filter(bp, l2_fltr);
14060         kfree(new_fltr);
14061         return rc;
14062 }
14063 #endif
14064
14065 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
14066 {
14067         spin_lock_bh(&bp->ntp_fltr_lock);
14068         if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
14069                 spin_unlock_bh(&bp->ntp_fltr_lock);
14070                 return;
14071         }
14072         hlist_del_rcu(&fltr->base.hash);
14073         bp->ntp_fltr_count--;
14074         spin_unlock_bh(&bp->ntp_fltr_lock);
14075         bnxt_del_l2_filter(bp, fltr->l2_fltr);
14076         clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
14077         kfree_rcu(fltr, base.rcu);
14078 }
14079
14080 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
14081 {
14082 #ifdef CONFIG_RFS_ACCEL
14083         int i;
14084
14085         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
14086                 struct hlist_head *head;
14087                 struct hlist_node *tmp;
14088                 struct bnxt_ntuple_filter *fltr;
14089                 int rc;
14090
14091                 head = &bp->ntp_fltr_hash_tbl[i];
14092                 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
14093                         bool del = false;
14094
14095                         if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
14096                                 if (fltr->base.flags & BNXT_ACT_NO_AGING)
14097                                         continue;
14098                                 if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
14099                                                         fltr->flow_id,
14100                                                         fltr->base.sw_id)) {
14101                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
14102                                                                          fltr);
14103                                         del = true;
14104                                 }
14105                         } else {
14106                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
14107                                                                        fltr);
14108                                 if (rc)
14109                                         del = true;
14110                                 else
14111                                         set_bit(BNXT_FLTR_VALID, &fltr->base.state);
14112                         }
14113
14114                         if (del)
14115                                 bnxt_del_ntp_filter(bp, fltr);
14116                 }
14117         }
14118 #endif
14119 }
14120
14121 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
14122                                     unsigned int entry, struct udp_tunnel_info *ti)
14123 {
14124         struct bnxt *bp = netdev_priv(netdev);
14125         unsigned int cmd;
14126
14127         if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
14128                 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
14129         else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
14130                 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
14131         else
14132                 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
14133
14134         return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
14135 }
14136
14137 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
14138                                       unsigned int entry, struct udp_tunnel_info *ti)
14139 {
14140         struct bnxt *bp = netdev_priv(netdev);
14141         unsigned int cmd;
14142
14143         if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
14144                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
14145         else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
14146                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
14147         else
14148                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
14149
14150         return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
14151 }
14152
14153 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
14154         .set_port       = bnxt_udp_tunnel_set_port,
14155         .unset_port     = bnxt_udp_tunnel_unset_port,
14156         .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
14157                           UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
14158         .tables         = {
14159                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
14160                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
14161         },
14162 }, bnxt_udp_tunnels_p7 = {
14163         .set_port       = bnxt_udp_tunnel_set_port,
14164         .unset_port     = bnxt_udp_tunnel_unset_port,
14165         .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
14166                           UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
14167         .tables         = {
14168                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
14169                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
14170                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
14171         },
14172 };
14173
14174 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
14175                                struct net_device *dev, u32 filter_mask,
14176                                int nlflags)
14177 {
14178         struct bnxt *bp = netdev_priv(dev);
14179
14180         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
14181                                        nlflags, filter_mask, NULL);
14182 }
14183
14184 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
14185                                u16 flags, struct netlink_ext_ack *extack)
14186 {
14187         struct bnxt *bp = netdev_priv(dev);
14188         struct nlattr *attr, *br_spec;
14189         int rem, rc = 0;
14190
14191         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
14192                 return -EOPNOTSUPP;
14193
14194         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
14195         if (!br_spec)
14196                 return -EINVAL;
14197
14198         nla_for_each_nested(attr, br_spec, rem) {
14199                 u16 mode;
14200
14201                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
14202                         continue;
14203
14204                 mode = nla_get_u16(attr);
14205                 if (mode == bp->br_mode)
14206                         break;
14207
14208                 rc = bnxt_hwrm_set_br_mode(bp, mode);
14209                 if (!rc)
14210                         bp->br_mode = mode;
14211                 break;
14212         }
14213         return rc;
14214 }
14215
14216 int bnxt_get_port_parent_id(struct net_device *dev,
14217                             struct netdev_phys_item_id *ppid)
14218 {
14219         struct bnxt *bp = netdev_priv(dev);
14220
14221         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
14222                 return -EOPNOTSUPP;
14223
14224         /* The PF and it's VF-reps only support the switchdev framework */
14225         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
14226                 return -EOPNOTSUPP;
14227
14228         ppid->id_len = sizeof(bp->dsn);
14229         memcpy(ppid->id, bp->dsn, ppid->id_len);
14230
14231         return 0;
14232 }
14233
14234 static const struct net_device_ops bnxt_netdev_ops = {
14235         .ndo_open               = bnxt_open,
14236         .ndo_start_xmit         = bnxt_start_xmit,
14237         .ndo_stop               = bnxt_close,
14238         .ndo_get_stats64        = bnxt_get_stats64,
14239         .ndo_set_rx_mode        = bnxt_set_rx_mode,
14240         .ndo_eth_ioctl          = bnxt_ioctl,
14241         .ndo_validate_addr      = eth_validate_addr,
14242         .ndo_set_mac_address    = bnxt_change_mac_addr,
14243         .ndo_change_mtu         = bnxt_change_mtu,
14244         .ndo_fix_features       = bnxt_fix_features,
14245         .ndo_set_features       = bnxt_set_features,
14246         .ndo_features_check     = bnxt_features_check,
14247         .ndo_tx_timeout         = bnxt_tx_timeout,
14248 #ifdef CONFIG_BNXT_SRIOV
14249         .ndo_get_vf_config      = bnxt_get_vf_config,
14250         .ndo_set_vf_mac         = bnxt_set_vf_mac,
14251         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
14252         .ndo_set_vf_rate        = bnxt_set_vf_bw,
14253         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
14254         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
14255         .ndo_set_vf_trust       = bnxt_set_vf_trust,
14256 #endif
14257         .ndo_setup_tc           = bnxt_setup_tc,
14258 #ifdef CONFIG_RFS_ACCEL
14259         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
14260 #endif
14261         .ndo_bpf                = bnxt_xdp,
14262         .ndo_xdp_xmit           = bnxt_xdp_xmit,
14263         .ndo_bridge_getlink     = bnxt_bridge_getlink,
14264         .ndo_bridge_setlink     = bnxt_bridge_setlink,
14265 };
14266
14267 static void bnxt_remove_one(struct pci_dev *pdev)
14268 {
14269         struct net_device *dev = pci_get_drvdata(pdev);
14270         struct bnxt *bp = netdev_priv(dev);
14271
14272         if (BNXT_PF(bp))
14273                 bnxt_sriov_disable(bp);
14274
14275         bnxt_rdma_aux_device_uninit(bp);
14276
14277         bnxt_ptp_clear(bp);
14278         unregister_netdev(dev);
14279         bnxt_free_l2_filters(bp, true);
14280         bnxt_free_ntp_fltrs(bp, true);
14281         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14282         /* Flush any pending tasks */
14283         cancel_work_sync(&bp->sp_task);
14284         cancel_delayed_work_sync(&bp->fw_reset_task);
14285         bp->sp_event = 0;
14286
14287         bnxt_dl_fw_reporters_destroy(bp);
14288         bnxt_dl_unregister(bp);
14289         bnxt_shutdown_tc(bp);
14290
14291         bnxt_clear_int_mode(bp);
14292         bnxt_hwrm_func_drv_unrgtr(bp);
14293         bnxt_free_hwrm_resources(bp);
14294         bnxt_hwmon_uninit(bp);
14295         bnxt_ethtool_free(bp);
14296         bnxt_dcb_free(bp);
14297         kfree(bp->ptp_cfg);
14298         bp->ptp_cfg = NULL;
14299         kfree(bp->fw_health);
14300         bp->fw_health = NULL;
14301         bnxt_cleanup_pci(bp);
14302         bnxt_free_ctx_mem(bp);
14303         kfree(bp->rss_indir_tbl);
14304         bp->rss_indir_tbl = NULL;
14305         bnxt_free_port_stats(bp);
14306         free_netdev(dev);
14307 }
14308
14309 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
14310 {
14311         int rc = 0;
14312         struct bnxt_link_info *link_info = &bp->link_info;
14313
14314         bp->phy_flags = 0;
14315         rc = bnxt_hwrm_phy_qcaps(bp);
14316         if (rc) {
14317                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
14318                            rc);
14319                 return rc;
14320         }
14321         if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
14322                 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
14323         else
14324                 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
14325         if (!fw_dflt)
14326                 return 0;
14327
14328         mutex_lock(&bp->link_lock);
14329         rc = bnxt_update_link(bp, false);
14330         if (rc) {
14331                 mutex_unlock(&bp->link_lock);
14332                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
14333                            rc);
14334                 return rc;
14335         }
14336
14337         /* Older firmware does not have supported_auto_speeds, so assume
14338          * that all supported speeds can be autonegotiated.
14339          */
14340         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
14341                 link_info->support_auto_speeds = link_info->support_speeds;
14342
14343         bnxt_init_ethtool_link_settings(bp);
14344         mutex_unlock(&bp->link_lock);
14345         return 0;
14346 }
14347
14348 static int bnxt_get_max_irq(struct pci_dev *pdev)
14349 {
14350         u16 ctrl;
14351
14352         if (!pdev->msix_cap)
14353                 return 1;
14354
14355         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
14356         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
14357 }
14358
14359 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14360                                 int *max_cp)
14361 {
14362         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
14363         int max_ring_grps = 0, max_irq;
14364
14365         *max_tx = hw_resc->max_tx_rings;
14366         *max_rx = hw_resc->max_rx_rings;
14367         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
14368         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
14369                         bnxt_get_ulp_msix_num(bp),
14370                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
14371         if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14372                 *max_cp = min_t(int, *max_cp, max_irq);
14373         max_ring_grps = hw_resc->max_hw_ring_grps;
14374         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
14375                 *max_cp -= 1;
14376                 *max_rx -= 2;
14377         }
14378         if (bp->flags & BNXT_FLAG_AGG_RINGS)
14379                 *max_rx >>= 1;
14380         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
14381                 int rc;
14382
14383                 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
14384                 if (rc) {
14385                         *max_rx = 0;
14386                         *max_tx = 0;
14387                 }
14388                 /* On P5 chips, max_cp output param should be available NQs */
14389                 *max_cp = max_irq;
14390         }
14391         *max_rx = min_t(int, *max_rx, max_ring_grps);
14392 }
14393
14394 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
14395 {
14396         int rx, tx, cp;
14397
14398         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
14399         *max_rx = rx;
14400         *max_tx = tx;
14401         if (!rx || !tx || !cp)
14402                 return -ENOMEM;
14403
14404         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
14405 }
14406
14407 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14408                                bool shared)
14409 {
14410         int rc;
14411
14412         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
14413         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
14414                 /* Not enough rings, try disabling agg rings. */
14415                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
14416                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
14417                 if (rc) {
14418                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
14419                         bp->flags |= BNXT_FLAG_AGG_RINGS;
14420                         return rc;
14421                 }
14422                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
14423                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
14424                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
14425                 bnxt_set_ring_params(bp);
14426         }
14427
14428         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
14429                 int max_cp, max_stat, max_irq;
14430
14431                 /* Reserve minimum resources for RoCE */
14432                 max_cp = bnxt_get_max_func_cp_rings(bp);
14433                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
14434                 max_irq = bnxt_get_max_func_irqs(bp);
14435                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
14436                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
14437                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
14438                         return 0;
14439
14440                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
14441                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
14442                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
14443                 max_cp = min_t(int, max_cp, max_irq);
14444                 max_cp = min_t(int, max_cp, max_stat);
14445                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
14446                 if (rc)
14447                         rc = 0;
14448         }
14449         return rc;
14450 }
14451
14452 /* In initial default shared ring setting, each shared ring must have a
14453  * RX/TX ring pair.
14454  */
14455 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
14456 {
14457         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
14458         bp->rx_nr_rings = bp->cp_nr_rings;
14459         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
14460         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
14461 }
14462
14463 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
14464 {
14465         int dflt_rings, max_rx_rings, max_tx_rings, rc;
14466
14467         if (!bnxt_can_reserve_rings(bp))
14468                 return 0;
14469
14470         if (sh)
14471                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
14472         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
14473         /* Reduce default rings on multi-port cards so that total default
14474          * rings do not exceed CPU count.
14475          */
14476         if (bp->port_count > 1) {
14477                 int max_rings =
14478                         max_t(int, num_online_cpus() / bp->port_count, 1);
14479
14480                 dflt_rings = min_t(int, dflt_rings, max_rings);
14481         }
14482         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
14483         if (rc)
14484                 return rc;
14485         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
14486         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
14487         if (sh)
14488                 bnxt_trim_dflt_sh_rings(bp);
14489         else
14490                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
14491         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
14492
14493         rc = __bnxt_reserve_rings(bp);
14494         if (rc && rc != -ENODEV)
14495                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
14496         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
14497         if (sh)
14498                 bnxt_trim_dflt_sh_rings(bp);
14499
14500         /* Rings may have been trimmed, re-reserve the trimmed rings. */
14501         if (bnxt_need_reserve_rings(bp)) {
14502                 rc = __bnxt_reserve_rings(bp);
14503                 if (rc && rc != -ENODEV)
14504                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
14505                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
14506         }
14507         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
14508                 bp->rx_nr_rings++;
14509                 bp->cp_nr_rings++;
14510         }
14511         if (rc) {
14512                 bp->tx_nr_rings = 0;
14513                 bp->rx_nr_rings = 0;
14514         }
14515         return rc;
14516 }
14517
14518 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
14519 {
14520         int rc;
14521
14522         if (bp->tx_nr_rings)
14523                 return 0;
14524
14525         bnxt_ulp_irq_stop(bp);
14526         bnxt_clear_int_mode(bp);
14527         rc = bnxt_set_dflt_rings(bp, true);
14528         if (rc) {
14529                 if (BNXT_VF(bp) && rc == -ENODEV)
14530                         netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
14531                 else
14532                         netdev_err(bp->dev, "Not enough rings available.\n");
14533                 goto init_dflt_ring_err;
14534         }
14535         rc = bnxt_init_int_mode(bp);
14536         if (rc)
14537                 goto init_dflt_ring_err;
14538
14539         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
14540
14541         bnxt_set_dflt_rfs(bp);
14542
14543 init_dflt_ring_err:
14544         bnxt_ulp_irq_restart(bp, rc);
14545         return rc;
14546 }
14547
14548 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
14549 {
14550         int rc;
14551
14552         ASSERT_RTNL();
14553         bnxt_hwrm_func_qcaps(bp);
14554
14555         if (netif_running(bp->dev))
14556                 __bnxt_close_nic(bp, true, false);
14557
14558         bnxt_ulp_irq_stop(bp);
14559         bnxt_clear_int_mode(bp);
14560         rc = bnxt_init_int_mode(bp);
14561         bnxt_ulp_irq_restart(bp, rc);
14562
14563         if (netif_running(bp->dev)) {
14564                 if (rc)
14565                         dev_close(bp->dev);
14566                 else
14567                         rc = bnxt_open_nic(bp, true, false);
14568         }
14569
14570         return rc;
14571 }
14572
14573 static int bnxt_init_mac_addr(struct bnxt *bp)
14574 {
14575         int rc = 0;
14576
14577         if (BNXT_PF(bp)) {
14578                 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
14579         } else {
14580 #ifdef CONFIG_BNXT_SRIOV
14581                 struct bnxt_vf_info *vf = &bp->vf;
14582                 bool strict_approval = true;
14583
14584                 if (is_valid_ether_addr(vf->mac_addr)) {
14585                         /* overwrite netdev dev_addr with admin VF MAC */
14586                         eth_hw_addr_set(bp->dev, vf->mac_addr);
14587                         /* Older PF driver or firmware may not approve this
14588                          * correctly.
14589                          */
14590                         strict_approval = false;
14591                 } else {
14592                         eth_hw_addr_random(bp->dev);
14593                 }
14594                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
14595 #endif
14596         }
14597         return rc;
14598 }
14599
14600 static void bnxt_vpd_read_info(struct bnxt *bp)
14601 {
14602         struct pci_dev *pdev = bp->pdev;
14603         unsigned int vpd_size, kw_len;
14604         int pos, size;
14605         u8 *vpd_data;
14606
14607         vpd_data = pci_vpd_alloc(pdev, &vpd_size);
14608         if (IS_ERR(vpd_data)) {
14609                 pci_warn(pdev, "Unable to read VPD\n");
14610                 return;
14611         }
14612
14613         pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
14614                                            PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
14615         if (pos < 0)
14616                 goto read_sn;
14617
14618         size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
14619         memcpy(bp->board_partno, &vpd_data[pos], size);
14620
14621 read_sn:
14622         pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
14623                                            PCI_VPD_RO_KEYWORD_SERIALNO,
14624                                            &kw_len);
14625         if (pos < 0)
14626                 goto exit;
14627
14628         size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
14629         memcpy(bp->board_serialno, &vpd_data[pos], size);
14630 exit:
14631         kfree(vpd_data);
14632 }
14633
14634 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
14635 {
14636         struct pci_dev *pdev = bp->pdev;
14637         u64 qword;
14638
14639         qword = pci_get_dsn(pdev);
14640         if (!qword) {
14641                 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
14642                 return -EOPNOTSUPP;
14643         }
14644
14645         put_unaligned_le64(qword, dsn);
14646
14647         bp->flags |= BNXT_FLAG_DSN_VALID;
14648         return 0;
14649 }
14650
14651 static int bnxt_map_db_bar(struct bnxt *bp)
14652 {
14653         if (!bp->db_size)
14654                 return -ENODEV;
14655         bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
14656         if (!bp->bar1)
14657                 return -ENOMEM;
14658         return 0;
14659 }
14660
14661 void bnxt_print_device_info(struct bnxt *bp)
14662 {
14663         netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
14664                     board_info[bp->board_idx].name,
14665                     (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
14666
14667         pcie_print_link_status(bp->pdev);
14668 }
14669
14670 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
14671 {
14672         struct net_device *dev;
14673         struct bnxt *bp;
14674         int rc, max_irqs;
14675
14676         if (pci_is_bridge(pdev))
14677                 return -ENODEV;
14678
14679         /* Clear any pending DMA transactions from crash kernel
14680          * while loading driver in capture kernel.
14681          */
14682         if (is_kdump_kernel()) {
14683                 pci_clear_master(pdev);
14684                 pcie_flr(pdev);
14685         }
14686
14687         max_irqs = bnxt_get_max_irq(pdev);
14688         dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
14689                                  max_irqs);
14690         if (!dev)
14691                 return -ENOMEM;
14692
14693         bp = netdev_priv(dev);
14694         bp->board_idx = ent->driver_data;
14695         bp->msg_enable = BNXT_DEF_MSG_ENABLE;
14696         bnxt_set_max_func_irqs(bp, max_irqs);
14697
14698         if (bnxt_vf_pciid(bp->board_idx))
14699                 bp->flags |= BNXT_FLAG_VF;
14700
14701         /* No devlink port registration in case of a VF */
14702         if (BNXT_PF(bp))
14703                 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
14704
14705         if (pdev->msix_cap)
14706                 bp->flags |= BNXT_FLAG_MSIX_CAP;
14707
14708         rc = bnxt_init_board(pdev, dev);
14709         if (rc < 0)
14710                 goto init_err_free;
14711
14712         dev->netdev_ops = &bnxt_netdev_ops;
14713         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
14714         dev->ethtool_ops = &bnxt_ethtool_ops;
14715         pci_set_drvdata(pdev, dev);
14716
14717         rc = bnxt_alloc_hwrm_resources(bp);
14718         if (rc)
14719                 goto init_err_pci_clean;
14720
14721         mutex_init(&bp->hwrm_cmd_lock);
14722         mutex_init(&bp->link_lock);
14723
14724         rc = bnxt_fw_init_one_p1(bp);
14725         if (rc)
14726                 goto init_err_pci_clean;
14727
14728         if (BNXT_PF(bp))
14729                 bnxt_vpd_read_info(bp);
14730
14731         if (BNXT_CHIP_P5_PLUS(bp)) {
14732                 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
14733                 if (BNXT_CHIP_P7(bp))
14734                         bp->flags |= BNXT_FLAG_CHIP_P7;
14735         }
14736
14737         rc = bnxt_alloc_rss_indir_tbl(bp);
14738         if (rc)
14739                 goto init_err_pci_clean;
14740
14741         rc = bnxt_fw_init_one_p2(bp);
14742         if (rc)
14743                 goto init_err_pci_clean;
14744
14745         rc = bnxt_map_db_bar(bp);
14746         if (rc) {
14747                 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
14748                         rc);
14749                 goto init_err_pci_clean;
14750         }
14751
14752         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
14753                            NETIF_F_TSO | NETIF_F_TSO6 |
14754                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
14755                            NETIF_F_GSO_IPXIP4 |
14756                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
14757                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
14758                            NETIF_F_RXCSUM | NETIF_F_GRO;
14759         if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
14760                 dev->hw_features |= NETIF_F_GSO_UDP_L4;
14761
14762         if (BNXT_SUPPORTS_TPA(bp))
14763                 dev->hw_features |= NETIF_F_LRO;
14764
14765         dev->hw_enc_features =
14766                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
14767                         NETIF_F_TSO | NETIF_F_TSO6 |
14768                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
14769                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
14770                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
14771         if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
14772                 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
14773         if (bp->flags & BNXT_FLAG_CHIP_P7)
14774                 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
14775         else
14776                 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
14777
14778         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
14779                                     NETIF_F_GSO_GRE_CSUM;
14780         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
14781         if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
14782                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
14783         if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
14784                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
14785         if (BNXT_SUPPORTS_TPA(bp))
14786                 dev->hw_features |= NETIF_F_GRO_HW;
14787         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
14788         if (dev->features & NETIF_F_GRO_HW)
14789                 dev->features &= ~NETIF_F_LRO;
14790         dev->priv_flags |= IFF_UNICAST_FLT;
14791
14792         netif_set_tso_max_size(dev, GSO_MAX_SIZE);
14793
14794         dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
14795                             NETDEV_XDP_ACT_RX_SG;
14796
14797 #ifdef CONFIG_BNXT_SRIOV
14798         init_waitqueue_head(&bp->sriov_cfg_wait);
14799 #endif
14800         if (BNXT_SUPPORTS_TPA(bp)) {
14801                 bp->gro_func = bnxt_gro_func_5730x;
14802                 if (BNXT_CHIP_P4(bp))
14803                         bp->gro_func = bnxt_gro_func_5731x;
14804                 else if (BNXT_CHIP_P5_PLUS(bp))
14805                         bp->gro_func = bnxt_gro_func_5750x;
14806         }
14807         if (!BNXT_CHIP_P4_PLUS(bp))
14808                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
14809
14810         rc = bnxt_init_mac_addr(bp);
14811         if (rc) {
14812                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
14813                 rc = -EADDRNOTAVAIL;
14814                 goto init_err_pci_clean;
14815         }
14816
14817         if (BNXT_PF(bp)) {
14818                 /* Read the adapter's DSN to use as the eswitch switch_id */
14819                 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
14820         }
14821
14822         /* MTU range: 60 - FW defined max */
14823         dev->min_mtu = ETH_ZLEN;
14824         dev->max_mtu = bp->max_mtu;
14825
14826         rc = bnxt_probe_phy(bp, true);
14827         if (rc)
14828                 goto init_err_pci_clean;
14829
14830         bnxt_init_l2_fltr_tbl(bp);
14831         bnxt_set_rx_skb_mode(bp, false);
14832         bnxt_set_tpa_flags(bp);
14833         bnxt_set_ring_params(bp);
14834         rc = bnxt_set_dflt_rings(bp, true);
14835         if (rc) {
14836                 if (BNXT_VF(bp) && rc == -ENODEV) {
14837                         netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
14838                 } else {
14839                         netdev_err(bp->dev, "Not enough rings available.\n");
14840                         rc = -ENOMEM;
14841                 }
14842                 goto init_err_pci_clean;
14843         }
14844
14845         bnxt_fw_init_one_p3(bp);
14846
14847         bnxt_init_dflt_coal(bp);
14848
14849         if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
14850                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
14851
14852         rc = bnxt_init_int_mode(bp);
14853         if (rc)
14854                 goto init_err_pci_clean;
14855
14856         /* No TC has been set yet and rings may have been trimmed due to
14857          * limited MSIX, so we re-initialize the TX rings per TC.
14858          */
14859         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
14860
14861         if (BNXT_PF(bp)) {
14862                 if (!bnxt_pf_wq) {
14863                         bnxt_pf_wq =
14864                                 create_singlethread_workqueue("bnxt_pf_wq");
14865                         if (!bnxt_pf_wq) {
14866                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
14867                                 rc = -ENOMEM;
14868                                 goto init_err_pci_clean;
14869                         }
14870                 }
14871                 rc = bnxt_init_tc(bp);
14872                 if (rc)
14873                         netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
14874                                    rc);
14875         }
14876
14877         bnxt_inv_fw_health_reg(bp);
14878         rc = bnxt_dl_register(bp);
14879         if (rc)
14880                 goto init_err_dl;
14881
14882         rc = register_netdev(dev);
14883         if (rc)
14884                 goto init_err_cleanup;
14885
14886         bnxt_dl_fw_reporters_create(bp);
14887
14888         bnxt_rdma_aux_device_init(bp);
14889
14890         bnxt_print_device_info(bp);
14891
14892         pci_save_state(pdev);
14893
14894         return 0;
14895 init_err_cleanup:
14896         bnxt_dl_unregister(bp);
14897 init_err_dl:
14898         bnxt_shutdown_tc(bp);
14899         bnxt_clear_int_mode(bp);
14900
14901 init_err_pci_clean:
14902         bnxt_hwrm_func_drv_unrgtr(bp);
14903         bnxt_free_hwrm_resources(bp);
14904         bnxt_hwmon_uninit(bp);
14905         bnxt_ethtool_free(bp);
14906         bnxt_ptp_clear(bp);
14907         kfree(bp->ptp_cfg);
14908         bp->ptp_cfg = NULL;
14909         kfree(bp->fw_health);
14910         bp->fw_health = NULL;
14911         bnxt_cleanup_pci(bp);
14912         bnxt_free_ctx_mem(bp);
14913         kfree(bp->rss_indir_tbl);
14914         bp->rss_indir_tbl = NULL;
14915
14916 init_err_free:
14917         free_netdev(dev);
14918         return rc;
14919 }
14920
14921 static void bnxt_shutdown(struct pci_dev *pdev)
14922 {
14923         struct net_device *dev = pci_get_drvdata(pdev);
14924         struct bnxt *bp;
14925
14926         if (!dev)
14927                 return;
14928
14929         rtnl_lock();
14930         bp = netdev_priv(dev);
14931         if (!bp)
14932                 goto shutdown_exit;
14933
14934         if (netif_running(dev))
14935                 dev_close(dev);
14936
14937         bnxt_clear_int_mode(bp);
14938         pci_disable_device(pdev);
14939
14940         if (system_state == SYSTEM_POWER_OFF) {
14941                 pci_wake_from_d3(pdev, bp->wol);
14942                 pci_set_power_state(pdev, PCI_D3hot);
14943         }
14944
14945 shutdown_exit:
14946         rtnl_unlock();
14947 }
14948
14949 #ifdef CONFIG_PM_SLEEP
14950 static int bnxt_suspend(struct device *device)
14951 {
14952         struct net_device *dev = dev_get_drvdata(device);
14953         struct bnxt *bp = netdev_priv(dev);
14954         int rc = 0;
14955
14956         rtnl_lock();
14957         bnxt_ulp_stop(bp);
14958         if (netif_running(dev)) {
14959                 netif_device_detach(dev);
14960                 rc = bnxt_close(dev);
14961         }
14962         bnxt_hwrm_func_drv_unrgtr(bp);
14963         pci_disable_device(bp->pdev);
14964         bnxt_free_ctx_mem(bp);
14965         rtnl_unlock();
14966         return rc;
14967 }
14968
14969 static int bnxt_resume(struct device *device)
14970 {
14971         struct net_device *dev = dev_get_drvdata(device);
14972         struct bnxt *bp = netdev_priv(dev);
14973         int rc = 0;
14974
14975         rtnl_lock();
14976         rc = pci_enable_device(bp->pdev);
14977         if (rc) {
14978                 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
14979                            rc);
14980                 goto resume_exit;
14981         }
14982         pci_set_master(bp->pdev);
14983         if (bnxt_hwrm_ver_get(bp)) {
14984                 rc = -ENODEV;
14985                 goto resume_exit;
14986         }
14987         rc = bnxt_hwrm_func_reset(bp);
14988         if (rc) {
14989                 rc = -EBUSY;
14990                 goto resume_exit;
14991         }
14992
14993         rc = bnxt_hwrm_func_qcaps(bp);
14994         if (rc)
14995                 goto resume_exit;
14996
14997         bnxt_clear_reservations(bp, true);
14998
14999         if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
15000                 rc = -ENODEV;
15001                 goto resume_exit;
15002         }
15003
15004         bnxt_get_wol_settings(bp);
15005         if (netif_running(dev)) {
15006                 rc = bnxt_open(dev);
15007                 if (!rc)
15008                         netif_device_attach(dev);
15009         }
15010
15011 resume_exit:
15012         bnxt_ulp_start(bp, rc);
15013         if (!rc)
15014                 bnxt_reenable_sriov(bp);
15015         rtnl_unlock();
15016         return rc;
15017 }
15018
15019 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
15020 #define BNXT_PM_OPS (&bnxt_pm_ops)
15021
15022 #else
15023
15024 #define BNXT_PM_OPS NULL
15025
15026 #endif /* CONFIG_PM_SLEEP */
15027
15028 /**
15029  * bnxt_io_error_detected - called when PCI error is detected
15030  * @pdev: Pointer to PCI device
15031  * @state: The current pci connection state
15032  *
15033  * This function is called after a PCI bus error affecting
15034  * this device has been detected.
15035  */
15036 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
15037                                                pci_channel_state_t state)
15038 {
15039         struct net_device *netdev = pci_get_drvdata(pdev);
15040         struct bnxt *bp = netdev_priv(netdev);
15041
15042         netdev_info(netdev, "PCI I/O error detected\n");
15043
15044         rtnl_lock();
15045         netif_device_detach(netdev);
15046
15047         bnxt_ulp_stop(bp);
15048
15049         if (state == pci_channel_io_perm_failure) {
15050                 rtnl_unlock();
15051                 return PCI_ERS_RESULT_DISCONNECT;
15052         }
15053
15054         if (state == pci_channel_io_frozen)
15055                 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
15056
15057         if (netif_running(netdev))
15058                 bnxt_close(netdev);
15059
15060         if (pci_is_enabled(pdev))
15061                 pci_disable_device(pdev);
15062         bnxt_free_ctx_mem(bp);
15063         rtnl_unlock();
15064
15065         /* Request a slot slot reset. */
15066         return PCI_ERS_RESULT_NEED_RESET;
15067 }
15068
15069 /**
15070  * bnxt_io_slot_reset - called after the pci bus has been reset.
15071  * @pdev: Pointer to PCI device
15072  *
15073  * Restart the card from scratch, as if from a cold-boot.
15074  * At this point, the card has exprienced a hard reset,
15075  * followed by fixups by BIOS, and has its config space
15076  * set up identically to what it was at cold boot.
15077  */
15078 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
15079 {
15080         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
15081         struct net_device *netdev = pci_get_drvdata(pdev);
15082         struct bnxt *bp = netdev_priv(netdev);
15083         int retry = 0;
15084         int err = 0;
15085         int off;
15086
15087         netdev_info(bp->dev, "PCI Slot Reset\n");
15088
15089         rtnl_lock();
15090
15091         if (pci_enable_device(pdev)) {
15092                 dev_err(&pdev->dev,
15093                         "Cannot re-enable PCI device after reset.\n");
15094         } else {
15095                 pci_set_master(pdev);
15096                 /* Upon fatal error, our device internal logic that latches to
15097                  * BAR value is getting reset and will restore only upon
15098                  * rewritting the BARs.
15099                  *
15100                  * As pci_restore_state() does not re-write the BARs if the
15101                  * value is same as saved value earlier, driver needs to
15102                  * write the BARs to 0 to force restore, in case of fatal error.
15103                  */
15104                 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
15105                                        &bp->state)) {
15106                         for (off = PCI_BASE_ADDRESS_0;
15107                              off <= PCI_BASE_ADDRESS_5; off += 4)
15108                                 pci_write_config_dword(bp->pdev, off, 0);
15109                 }
15110                 pci_restore_state(pdev);
15111                 pci_save_state(pdev);
15112
15113                 bnxt_inv_fw_health_reg(bp);
15114                 bnxt_try_map_fw_health_reg(bp);
15115
15116                 /* In some PCIe AER scenarios, firmware may take up to
15117                  * 10 seconds to become ready in the worst case.
15118                  */
15119                 do {
15120                         err = bnxt_try_recover_fw(bp);
15121                         if (!err)
15122                                 break;
15123                         retry++;
15124                 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
15125
15126                 if (err) {
15127                         dev_err(&pdev->dev, "Firmware not ready\n");
15128                         goto reset_exit;
15129                 }
15130
15131                 err = bnxt_hwrm_func_reset(bp);
15132                 if (!err)
15133                         result = PCI_ERS_RESULT_RECOVERED;
15134
15135                 bnxt_ulp_irq_stop(bp);
15136                 bnxt_clear_int_mode(bp);
15137                 err = bnxt_init_int_mode(bp);
15138                 bnxt_ulp_irq_restart(bp, err);
15139         }
15140
15141 reset_exit:
15142         bnxt_clear_reservations(bp, true);
15143         rtnl_unlock();
15144
15145         return result;
15146 }
15147
15148 /**
15149  * bnxt_io_resume - called when traffic can start flowing again.
15150  * @pdev: Pointer to PCI device
15151  *
15152  * This callback is called when the error recovery driver tells
15153  * us that its OK to resume normal operation.
15154  */
15155 static void bnxt_io_resume(struct pci_dev *pdev)
15156 {
15157         struct net_device *netdev = pci_get_drvdata(pdev);
15158         struct bnxt *bp = netdev_priv(netdev);
15159         int err;
15160
15161         netdev_info(bp->dev, "PCI Slot Resume\n");
15162         rtnl_lock();
15163
15164         err = bnxt_hwrm_func_qcaps(bp);
15165         if (!err && netif_running(netdev))
15166                 err = bnxt_open(netdev);
15167
15168         bnxt_ulp_start(bp, err);
15169         if (!err) {
15170                 bnxt_reenable_sriov(bp);
15171                 netif_device_attach(netdev);
15172         }
15173
15174         rtnl_unlock();
15175 }
15176
15177 static const struct pci_error_handlers bnxt_err_handler = {
15178         .error_detected = bnxt_io_error_detected,
15179         .slot_reset     = bnxt_io_slot_reset,
15180         .resume         = bnxt_io_resume
15181 };
15182
15183 static struct pci_driver bnxt_pci_driver = {
15184         .name           = DRV_MODULE_NAME,
15185         .id_table       = bnxt_pci_tbl,
15186         .probe          = bnxt_init_one,
15187         .remove         = bnxt_remove_one,
15188         .shutdown       = bnxt_shutdown,
15189         .driver.pm      = BNXT_PM_OPS,
15190         .err_handler    = &bnxt_err_handler,
15191 #if defined(CONFIG_BNXT_SRIOV)
15192         .sriov_configure = bnxt_sriov_configure,
15193 #endif
15194 };
15195
15196 static int __init bnxt_init(void)
15197 {
15198         int err;
15199
15200         bnxt_debug_init();
15201         err = pci_register_driver(&bnxt_pci_driver);
15202         if (err) {
15203                 bnxt_debug_exit();
15204                 return err;
15205         }
15206
15207         return 0;
15208 }
15209
15210 static void __exit bnxt_exit(void)
15211 {
15212         pci_unregister_driver(&bnxt_pci_driver);
15213         if (bnxt_pf_wq)
15214                 destroy_workqueue(bnxt_pf_wq);
15215         bnxt_debug_exit();
15216 }
15217
15218 module_init(bnxt_init);
15219 module_exit(bnxt_exit);