Merge branch 'pci/dwc'
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/ptp_clock_kernel.h>
53 #include <linux/timecounter.h>
54 #include <linux/cpu_rmap.h>
55 #include <linux/cpumask.h>
56 #include <net/pkt_cls.h>
57 #include <linux/hwmon.h>
58 #include <linux/hwmon-sysfs.h>
59 #include <net/page_pool.h>
60
61 #include "bnxt_hsi.h"
62 #include "bnxt.h"
63 #include "bnxt_ulp.h"
64 #include "bnxt_sriov.h"
65 #include "bnxt_ethtool.h"
66 #include "bnxt_dcb.h"
67 #include "bnxt_xdp.h"
68 #include "bnxt_ptp.h"
69 #include "bnxt_vfr.h"
70 #include "bnxt_tc.h"
71 #include "bnxt_devlink.h"
72 #include "bnxt_debugfs.h"
73
74 #define BNXT_TX_TIMEOUT         (5 * HZ)
75 #define BNXT_DEF_MSG_ENABLE     (NETIF_MSG_DRV | NETIF_MSG_HW)
76
77 MODULE_LICENSE("GPL");
78 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
79
80 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82 #define BNXT_RX_COPY_THRESH 256
83
84 #define BNXT_TX_PUSH_THRESH 164
85
86 enum board_idx {
87         BCM57301,
88         BCM57302,
89         BCM57304,
90         BCM57417_NPAR,
91         BCM58700,
92         BCM57311,
93         BCM57312,
94         BCM57402,
95         BCM57404,
96         BCM57406,
97         BCM57402_NPAR,
98         BCM57407,
99         BCM57412,
100         BCM57414,
101         BCM57416,
102         BCM57417,
103         BCM57412_NPAR,
104         BCM57314,
105         BCM57417_SFP,
106         BCM57416_SFP,
107         BCM57404_NPAR,
108         BCM57406_NPAR,
109         BCM57407_SFP,
110         BCM57407_NPAR,
111         BCM57414_NPAR,
112         BCM57416_NPAR,
113         BCM57452,
114         BCM57454,
115         BCM5745x_NPAR,
116         BCM57508,
117         BCM57504,
118         BCM57502,
119         BCM57508_NPAR,
120         BCM57504_NPAR,
121         BCM57502_NPAR,
122         BCM58802,
123         BCM58804,
124         BCM58808,
125         NETXTREME_E_VF,
126         NETXTREME_C_VF,
127         NETXTREME_S_VF,
128         NETXTREME_C_VF_HV,
129         NETXTREME_E_VF_HV,
130         NETXTREME_E_P5_VF,
131         NETXTREME_E_P5_VF_HV,
132 };
133
134 /* indexed by enum above */
135 static const struct {
136         char *name;
137 } board_info[] = {
138         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
139         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
140         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
141         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
142         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
143         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
144         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
145         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
146         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
147         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
148         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
149         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
150         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
151         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
152         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
153         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
154         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
155         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
156         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
157         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
158         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
159         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
160         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
161         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
162         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
163         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
164         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
165         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
166         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
167         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
168         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
169         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
170         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
171         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
172         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
173         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
174         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
175         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
176         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
177         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
178         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
179         [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
180         [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
181         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
182         [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
183 };
184
185 static const struct pci_device_id bnxt_pci_tbl[] = {
186         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
187         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
188         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
189         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
190         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
191         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
192         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
193         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
194         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
195         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
196         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
197         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
198         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
199         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
200         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
201         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
202         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
203         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
204         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
205         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
206         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
207         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
208         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
209         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
210         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
211         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
212         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
213         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
214         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
215         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
216         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
217         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
218         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
219         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
220         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
221         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
222         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
223         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
224         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
225         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
226         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
227         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
228         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
229         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
230         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
231         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
232 #ifdef CONFIG_BNXT_SRIOV
233         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
234         { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
235         { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
236         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
237         { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
238         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
239         { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
240         { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
241         { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
242         { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
243         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
244         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
245         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
246         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
247         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
248         { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
249         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
250         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
251         { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
252         { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
253         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
254 #endif
255         { 0 }
256 };
257
258 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
259
260 static const u16 bnxt_vf_req_snif[] = {
261         HWRM_FUNC_CFG,
262         HWRM_FUNC_VF_CFG,
263         HWRM_PORT_PHY_QCFG,
264         HWRM_CFA_L2_FILTER_ALLOC,
265 };
266
267 static const u16 bnxt_async_events_arr[] = {
268         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
269         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
270         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
271         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
272         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
273         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
274         ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
275         ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
276         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
277         ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
278         ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
279         ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
280 };
281
282 static struct workqueue_struct *bnxt_pf_wq;
283
284 static bool bnxt_vf_pciid(enum board_idx idx)
285 {
286         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
287                 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
288                 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
289                 idx == NETXTREME_E_P5_VF_HV);
290 }
291
292 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
293 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
294 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
295
296 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
297                 writel(DB_CP_IRQ_DIS_FLAGS, db)
298
299 #define BNXT_DB_CQ(db, idx)                                             \
300         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
301
302 #define BNXT_DB_NQ_P5(db, idx)                                          \
303         writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
304
305 #define BNXT_DB_CQ_ARM(db, idx)                                         \
306         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
307
308 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
309         writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
310
311 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
312 {
313         if (bp->flags & BNXT_FLAG_CHIP_P5)
314                 BNXT_DB_NQ_P5(db, idx);
315         else
316                 BNXT_DB_CQ(db, idx);
317 }
318
319 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
320 {
321         if (bp->flags & BNXT_FLAG_CHIP_P5)
322                 BNXT_DB_NQ_ARM_P5(db, idx);
323         else
324                 BNXT_DB_CQ_ARM(db, idx);
325 }
326
327 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
328 {
329         if (bp->flags & BNXT_FLAG_CHIP_P5)
330                 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
331                        db->doorbell);
332         else
333                 BNXT_DB_CQ(db, idx);
334 }
335
336 const u16 bnxt_lhint_arr[] = {
337         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
338         TX_BD_FLAGS_LHINT_512_TO_1023,
339         TX_BD_FLAGS_LHINT_1024_TO_2047,
340         TX_BD_FLAGS_LHINT_1024_TO_2047,
341         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
342         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
343         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
344         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
345         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
346         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
347         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
348         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356 };
357
358 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
359 {
360         struct metadata_dst *md_dst = skb_metadata_dst(skb);
361
362         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
363                 return 0;
364
365         return md_dst->u.port_info.port_id;
366 }
367
368 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
369 {
370         struct bnxt *bp = netdev_priv(dev);
371         struct tx_bd *txbd;
372         struct tx_bd_ext *txbd1;
373         struct netdev_queue *txq;
374         int i;
375         dma_addr_t mapping;
376         unsigned int length, pad = 0;
377         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
378         u16 prod, last_frag;
379         struct pci_dev *pdev = bp->pdev;
380         struct bnxt_tx_ring_info *txr;
381         struct bnxt_sw_tx_bd *tx_buf;
382         __le32 lflags = 0;
383
384         i = skb_get_queue_mapping(skb);
385         if (unlikely(i >= bp->tx_nr_rings)) {
386                 dev_kfree_skb_any(skb);
387                 return NETDEV_TX_OK;
388         }
389
390         txq = netdev_get_tx_queue(dev, i);
391         txr = &bp->tx_ring[bp->tx_ring_map[i]];
392         prod = txr->tx_prod;
393
394         free_size = bnxt_tx_avail(bp, txr);
395         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
396                 netif_tx_stop_queue(txq);
397                 return NETDEV_TX_BUSY;
398         }
399
400         length = skb->len;
401         len = skb_headlen(skb);
402         last_frag = skb_shinfo(skb)->nr_frags;
403
404         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
405
406         txbd->tx_bd_opaque = prod;
407
408         tx_buf = &txr->tx_buf_ring[prod];
409         tx_buf->skb = skb;
410         tx_buf->nr_frags = last_frag;
411
412         vlan_tag_flags = 0;
413         cfa_action = bnxt_xmit_get_cfa_action(skb);
414         if (skb_vlan_tag_present(skb)) {
415                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
416                                  skb_vlan_tag_get(skb);
417                 /* Currently supports 8021Q, 8021AD vlan offloads
418                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
419                  */
420                 if (skb->vlan_proto == htons(ETH_P_8021Q))
421                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
422         }
423
424         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
425                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
426
427                 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
428                     atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
429                         if (!bnxt_ptp_parse(skb, &ptp->tx_seqid)) {
430                                 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
431                                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
432                         } else {
433                                 atomic_inc(&bp->ptp_cfg->tx_avail);
434                         }
435                 }
436         }
437
438         if (unlikely(skb->no_fcs))
439                 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
440
441         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
442             !lflags) {
443                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
444                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
445                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
446                 void __iomem *db = txr->tx_db.doorbell;
447                 void *pdata = tx_push_buf->data;
448                 u64 *end;
449                 int j, push_len;
450
451                 /* Set COAL_NOW to be ready quickly for the next push */
452                 tx_push->tx_bd_len_flags_type =
453                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
454                                         TX_BD_TYPE_LONG_TX_BD |
455                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
456                                         TX_BD_FLAGS_COAL_NOW |
457                                         TX_BD_FLAGS_PACKET_END |
458                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
459
460                 if (skb->ip_summed == CHECKSUM_PARTIAL)
461                         tx_push1->tx_bd_hsize_lflags =
462                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
463                 else
464                         tx_push1->tx_bd_hsize_lflags = 0;
465
466                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
467                 tx_push1->tx_bd_cfa_action =
468                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
469
470                 end = pdata + length;
471                 end = PTR_ALIGN(end, 8) - 1;
472                 *end = 0;
473
474                 skb_copy_from_linear_data(skb, pdata, len);
475                 pdata += len;
476                 for (j = 0; j < last_frag; j++) {
477                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
478                         void *fptr;
479
480                         fptr = skb_frag_address_safe(frag);
481                         if (!fptr)
482                                 goto normal_tx;
483
484                         memcpy(pdata, fptr, skb_frag_size(frag));
485                         pdata += skb_frag_size(frag);
486                 }
487
488                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
489                 txbd->tx_bd_haddr = txr->data_mapping;
490                 prod = NEXT_TX(prod);
491                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
492                 memcpy(txbd, tx_push1, sizeof(*txbd));
493                 prod = NEXT_TX(prod);
494                 tx_push->doorbell =
495                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
496                 txr->tx_prod = prod;
497
498                 tx_buf->is_push = 1;
499                 netdev_tx_sent_queue(txq, skb->len);
500                 wmb();  /* Sync is_push and byte queue before pushing data */
501
502                 push_len = (length + sizeof(*tx_push) + 7) / 8;
503                 if (push_len > 16) {
504                         __iowrite64_copy(db, tx_push_buf, 16);
505                         __iowrite32_copy(db + 4, tx_push_buf + 1,
506                                          (push_len - 16) << 1);
507                 } else {
508                         __iowrite64_copy(db, tx_push_buf, push_len);
509                 }
510
511                 goto tx_done;
512         }
513
514 normal_tx:
515         if (length < BNXT_MIN_PKT_SIZE) {
516                 pad = BNXT_MIN_PKT_SIZE - length;
517                 if (skb_pad(skb, pad)) {
518                         /* SKB already freed. */
519                         tx_buf->skb = NULL;
520                         return NETDEV_TX_OK;
521                 }
522                 length = BNXT_MIN_PKT_SIZE;
523         }
524
525         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
526
527         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
528                 dev_kfree_skb_any(skb);
529                 tx_buf->skb = NULL;
530                 return NETDEV_TX_OK;
531         }
532
533         dma_unmap_addr_set(tx_buf, mapping, mapping);
534         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
535                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
536
537         txbd->tx_bd_haddr = cpu_to_le64(mapping);
538
539         prod = NEXT_TX(prod);
540         txbd1 = (struct tx_bd_ext *)
541                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
542
543         txbd1->tx_bd_hsize_lflags = lflags;
544         if (skb_is_gso(skb)) {
545                 u32 hdr_len;
546
547                 if (skb->encapsulation)
548                         hdr_len = skb_inner_network_offset(skb) +
549                                 skb_inner_network_header_len(skb) +
550                                 inner_tcp_hdrlen(skb);
551                 else
552                         hdr_len = skb_transport_offset(skb) +
553                                 tcp_hdrlen(skb);
554
555                 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
556                                         TX_BD_FLAGS_T_IPID |
557                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
558                 length = skb_shinfo(skb)->gso_size;
559                 txbd1->tx_bd_mss = cpu_to_le32(length);
560                 length += hdr_len;
561         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
562                 txbd1->tx_bd_hsize_lflags |=
563                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
564                 txbd1->tx_bd_mss = 0;
565         }
566
567         length >>= 9;
568         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
569                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
570                                      skb->len);
571                 i = 0;
572                 goto tx_dma_error;
573         }
574         flags |= bnxt_lhint_arr[length];
575         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
576
577         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
578         txbd1->tx_bd_cfa_action =
579                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
580         for (i = 0; i < last_frag; i++) {
581                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
582
583                 prod = NEXT_TX(prod);
584                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
585
586                 len = skb_frag_size(frag);
587                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
588                                            DMA_TO_DEVICE);
589
590                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
591                         goto tx_dma_error;
592
593                 tx_buf = &txr->tx_buf_ring[prod];
594                 dma_unmap_addr_set(tx_buf, mapping, mapping);
595
596                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
597
598                 flags = len << TX_BD_LEN_SHIFT;
599                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
600         }
601
602         flags &= ~TX_BD_LEN;
603         txbd->tx_bd_len_flags_type =
604                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
605                             TX_BD_FLAGS_PACKET_END);
606
607         netdev_tx_sent_queue(txq, skb->len);
608
609         skb_tx_timestamp(skb);
610
611         /* Sync BD data before updating doorbell */
612         wmb();
613
614         prod = NEXT_TX(prod);
615         txr->tx_prod = prod;
616
617         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
618                 bnxt_db_write(bp, &txr->tx_db, prod);
619
620 tx_done:
621
622         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
623                 if (netdev_xmit_more() && !tx_buf->is_push)
624                         bnxt_db_write(bp, &txr->tx_db, prod);
625
626                 netif_tx_stop_queue(txq);
627
628                 /* netif_tx_stop_queue() must be done before checking
629                  * tx index in bnxt_tx_avail() below, because in
630                  * bnxt_tx_int(), we update tx index before checking for
631                  * netif_tx_queue_stopped().
632                  */
633                 smp_mb();
634                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
635                         netif_tx_wake_queue(txq);
636         }
637         return NETDEV_TX_OK;
638
639 tx_dma_error:
640         if (BNXT_TX_PTP_IS_SET(lflags))
641                 atomic_inc(&bp->ptp_cfg->tx_avail);
642
643         last_frag = i;
644
645         /* start back at beginning and unmap skb */
646         prod = txr->tx_prod;
647         tx_buf = &txr->tx_buf_ring[prod];
648         tx_buf->skb = NULL;
649         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
650                          skb_headlen(skb), PCI_DMA_TODEVICE);
651         prod = NEXT_TX(prod);
652
653         /* unmap remaining mapped pages */
654         for (i = 0; i < last_frag; i++) {
655                 prod = NEXT_TX(prod);
656                 tx_buf = &txr->tx_buf_ring[prod];
657                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
658                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
659                                PCI_DMA_TODEVICE);
660         }
661
662         dev_kfree_skb_any(skb);
663         return NETDEV_TX_OK;
664 }
665
666 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
667 {
668         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
669         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
670         u16 cons = txr->tx_cons;
671         struct pci_dev *pdev = bp->pdev;
672         int i;
673         unsigned int tx_bytes = 0;
674
675         for (i = 0; i < nr_pkts; i++) {
676                 struct bnxt_sw_tx_bd *tx_buf;
677                 bool compl_deferred = false;
678                 struct sk_buff *skb;
679                 int j, last;
680
681                 tx_buf = &txr->tx_buf_ring[cons];
682                 cons = NEXT_TX(cons);
683                 skb = tx_buf->skb;
684                 tx_buf->skb = NULL;
685
686                 if (tx_buf->is_push) {
687                         tx_buf->is_push = 0;
688                         goto next_tx_int;
689                 }
690
691                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
692                                  skb_headlen(skb), PCI_DMA_TODEVICE);
693                 last = tx_buf->nr_frags;
694
695                 for (j = 0; j < last; j++) {
696                         cons = NEXT_TX(cons);
697                         tx_buf = &txr->tx_buf_ring[cons];
698                         dma_unmap_page(
699                                 &pdev->dev,
700                                 dma_unmap_addr(tx_buf, mapping),
701                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
702                                 PCI_DMA_TODEVICE);
703                 }
704                 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
705                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
706                                 if (!bnxt_get_tx_ts_p5(bp, skb))
707                                         compl_deferred = true;
708                                 else
709                                         atomic_inc(&bp->ptp_cfg->tx_avail);
710                         }
711                 }
712
713 next_tx_int:
714                 cons = NEXT_TX(cons);
715
716                 tx_bytes += skb->len;
717                 if (!compl_deferred)
718                         dev_kfree_skb_any(skb);
719         }
720
721         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
722         txr->tx_cons = cons;
723
724         /* Need to make the tx_cons update visible to bnxt_start_xmit()
725          * before checking for netif_tx_queue_stopped().  Without the
726          * memory barrier, there is a small possibility that bnxt_start_xmit()
727          * will miss it and cause the queue to be stopped forever.
728          */
729         smp_mb();
730
731         if (unlikely(netif_tx_queue_stopped(txq)) &&
732             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
733                 __netif_tx_lock(txq, smp_processor_id());
734                 if (netif_tx_queue_stopped(txq) &&
735                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
736                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
737                         netif_tx_wake_queue(txq);
738                 __netif_tx_unlock(txq);
739         }
740 }
741
742 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
743                                          struct bnxt_rx_ring_info *rxr,
744                                          gfp_t gfp)
745 {
746         struct device *dev = &bp->pdev->dev;
747         struct page *page;
748
749         page = page_pool_dev_alloc_pages(rxr->page_pool);
750         if (!page)
751                 return NULL;
752
753         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
754                                       DMA_ATTR_WEAK_ORDERING);
755         if (dma_mapping_error(dev, *mapping)) {
756                 page_pool_recycle_direct(rxr->page_pool, page);
757                 return NULL;
758         }
759         *mapping += bp->rx_dma_offset;
760         return page;
761 }
762
763 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
764                                        gfp_t gfp)
765 {
766         u8 *data;
767         struct pci_dev *pdev = bp->pdev;
768
769         data = kmalloc(bp->rx_buf_size, gfp);
770         if (!data)
771                 return NULL;
772
773         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
774                                         bp->rx_buf_use_size, bp->rx_dir,
775                                         DMA_ATTR_WEAK_ORDERING);
776
777         if (dma_mapping_error(&pdev->dev, *mapping)) {
778                 kfree(data);
779                 data = NULL;
780         }
781         return data;
782 }
783
784 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
785                        u16 prod, gfp_t gfp)
786 {
787         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
788         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
789         dma_addr_t mapping;
790
791         if (BNXT_RX_PAGE_MODE(bp)) {
792                 struct page *page =
793                         __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
794
795                 if (!page)
796                         return -ENOMEM;
797
798                 rx_buf->data = page;
799                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
800         } else {
801                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
802
803                 if (!data)
804                         return -ENOMEM;
805
806                 rx_buf->data = data;
807                 rx_buf->data_ptr = data + bp->rx_offset;
808         }
809         rx_buf->mapping = mapping;
810
811         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
812         return 0;
813 }
814
815 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
816 {
817         u16 prod = rxr->rx_prod;
818         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
819         struct rx_bd *cons_bd, *prod_bd;
820
821         prod_rx_buf = &rxr->rx_buf_ring[prod];
822         cons_rx_buf = &rxr->rx_buf_ring[cons];
823
824         prod_rx_buf->data = data;
825         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
826
827         prod_rx_buf->mapping = cons_rx_buf->mapping;
828
829         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
830         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
831
832         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
833 }
834
835 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
836 {
837         u16 next, max = rxr->rx_agg_bmap_size;
838
839         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
840         if (next >= max)
841                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
842         return next;
843 }
844
845 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
846                                      struct bnxt_rx_ring_info *rxr,
847                                      u16 prod, gfp_t gfp)
848 {
849         struct rx_bd *rxbd =
850                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
851         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
852         struct pci_dev *pdev = bp->pdev;
853         struct page *page;
854         dma_addr_t mapping;
855         u16 sw_prod = rxr->rx_sw_agg_prod;
856         unsigned int offset = 0;
857
858         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
859                 page = rxr->rx_page;
860                 if (!page) {
861                         page = alloc_page(gfp);
862                         if (!page)
863                                 return -ENOMEM;
864                         rxr->rx_page = page;
865                         rxr->rx_page_offset = 0;
866                 }
867                 offset = rxr->rx_page_offset;
868                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
869                 if (rxr->rx_page_offset == PAGE_SIZE)
870                         rxr->rx_page = NULL;
871                 else
872                         get_page(page);
873         } else {
874                 page = alloc_page(gfp);
875                 if (!page)
876                         return -ENOMEM;
877         }
878
879         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
880                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
881                                      DMA_ATTR_WEAK_ORDERING);
882         if (dma_mapping_error(&pdev->dev, mapping)) {
883                 __free_page(page);
884                 return -EIO;
885         }
886
887         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
888                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
889
890         __set_bit(sw_prod, rxr->rx_agg_bmap);
891         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
892         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
893
894         rx_agg_buf->page = page;
895         rx_agg_buf->offset = offset;
896         rx_agg_buf->mapping = mapping;
897         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
898         rxbd->rx_bd_opaque = sw_prod;
899         return 0;
900 }
901
902 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
903                                        struct bnxt_cp_ring_info *cpr,
904                                        u16 cp_cons, u16 curr)
905 {
906         struct rx_agg_cmp *agg;
907
908         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
909         agg = (struct rx_agg_cmp *)
910                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
911         return agg;
912 }
913
914 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
915                                               struct bnxt_rx_ring_info *rxr,
916                                               u16 agg_id, u16 curr)
917 {
918         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
919
920         return &tpa_info->agg_arr[curr];
921 }
922
923 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
924                                    u16 start, u32 agg_bufs, bool tpa)
925 {
926         struct bnxt_napi *bnapi = cpr->bnapi;
927         struct bnxt *bp = bnapi->bp;
928         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
929         u16 prod = rxr->rx_agg_prod;
930         u16 sw_prod = rxr->rx_sw_agg_prod;
931         bool p5_tpa = false;
932         u32 i;
933
934         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
935                 p5_tpa = true;
936
937         for (i = 0; i < agg_bufs; i++) {
938                 u16 cons;
939                 struct rx_agg_cmp *agg;
940                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
941                 struct rx_bd *prod_bd;
942                 struct page *page;
943
944                 if (p5_tpa)
945                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
946                 else
947                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
948                 cons = agg->rx_agg_cmp_opaque;
949                 __clear_bit(cons, rxr->rx_agg_bmap);
950
951                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
952                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
953
954                 __set_bit(sw_prod, rxr->rx_agg_bmap);
955                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
956                 cons_rx_buf = &rxr->rx_agg_ring[cons];
957
958                 /* It is possible for sw_prod to be equal to cons, so
959                  * set cons_rx_buf->page to NULL first.
960                  */
961                 page = cons_rx_buf->page;
962                 cons_rx_buf->page = NULL;
963                 prod_rx_buf->page = page;
964                 prod_rx_buf->offset = cons_rx_buf->offset;
965
966                 prod_rx_buf->mapping = cons_rx_buf->mapping;
967
968                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
969
970                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
971                 prod_bd->rx_bd_opaque = sw_prod;
972
973                 prod = NEXT_RX_AGG(prod);
974                 sw_prod = NEXT_RX_AGG(sw_prod);
975         }
976         rxr->rx_agg_prod = prod;
977         rxr->rx_sw_agg_prod = sw_prod;
978 }
979
980 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
981                                         struct bnxt_rx_ring_info *rxr,
982                                         u16 cons, void *data, u8 *data_ptr,
983                                         dma_addr_t dma_addr,
984                                         unsigned int offset_and_len)
985 {
986         unsigned int payload = offset_and_len >> 16;
987         unsigned int len = offset_and_len & 0xffff;
988         skb_frag_t *frag;
989         struct page *page = data;
990         u16 prod = rxr->rx_prod;
991         struct sk_buff *skb;
992         int off, err;
993
994         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
995         if (unlikely(err)) {
996                 bnxt_reuse_rx_data(rxr, cons, data);
997                 return NULL;
998         }
999         dma_addr -= bp->rx_dma_offset;
1000         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1001                              DMA_ATTR_WEAK_ORDERING);
1002         page_pool_release_page(rxr->page_pool, page);
1003
1004         if (unlikely(!payload))
1005                 payload = eth_get_headlen(bp->dev, data_ptr, len);
1006
1007         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1008         if (!skb) {
1009                 __free_page(page);
1010                 return NULL;
1011         }
1012
1013         off = (void *)data_ptr - page_address(page);
1014         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1015         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1016                payload + NET_IP_ALIGN);
1017
1018         frag = &skb_shinfo(skb)->frags[0];
1019         skb_frag_size_sub(frag, payload);
1020         skb_frag_off_add(frag, payload);
1021         skb->data_len -= payload;
1022         skb->tail += payload;
1023
1024         return skb;
1025 }
1026
1027 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1028                                    struct bnxt_rx_ring_info *rxr, u16 cons,
1029                                    void *data, u8 *data_ptr,
1030                                    dma_addr_t dma_addr,
1031                                    unsigned int offset_and_len)
1032 {
1033         u16 prod = rxr->rx_prod;
1034         struct sk_buff *skb;
1035         int err;
1036
1037         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1038         if (unlikely(err)) {
1039                 bnxt_reuse_rx_data(rxr, cons, data);
1040                 return NULL;
1041         }
1042
1043         skb = build_skb(data, 0);
1044         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1045                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1046         if (!skb) {
1047                 kfree(data);
1048                 return NULL;
1049         }
1050
1051         skb_reserve(skb, bp->rx_offset);
1052         skb_put(skb, offset_and_len & 0xffff);
1053         return skb;
1054 }
1055
1056 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1057                                      struct bnxt_cp_ring_info *cpr,
1058                                      struct sk_buff *skb, u16 idx,
1059                                      u32 agg_bufs, bool tpa)
1060 {
1061         struct bnxt_napi *bnapi = cpr->bnapi;
1062         struct pci_dev *pdev = bp->pdev;
1063         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1064         u16 prod = rxr->rx_agg_prod;
1065         bool p5_tpa = false;
1066         u32 i;
1067
1068         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1069                 p5_tpa = true;
1070
1071         for (i = 0; i < agg_bufs; i++) {
1072                 u16 cons, frag_len;
1073                 struct rx_agg_cmp *agg;
1074                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1075                 struct page *page;
1076                 dma_addr_t mapping;
1077
1078                 if (p5_tpa)
1079                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1080                 else
1081                         agg = bnxt_get_agg(bp, cpr, idx, i);
1082                 cons = agg->rx_agg_cmp_opaque;
1083                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1084                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1085
1086                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1087                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1088                                    cons_rx_buf->offset, frag_len);
1089                 __clear_bit(cons, rxr->rx_agg_bmap);
1090
1091                 /* It is possible for bnxt_alloc_rx_page() to allocate
1092                  * a sw_prod index that equals the cons index, so we
1093                  * need to clear the cons entry now.
1094                  */
1095                 mapping = cons_rx_buf->mapping;
1096                 page = cons_rx_buf->page;
1097                 cons_rx_buf->page = NULL;
1098
1099                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1100                         struct skb_shared_info *shinfo;
1101                         unsigned int nr_frags;
1102
1103                         shinfo = skb_shinfo(skb);
1104                         nr_frags = --shinfo->nr_frags;
1105                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1106
1107                         dev_kfree_skb(skb);
1108
1109                         cons_rx_buf->page = page;
1110
1111                         /* Update prod since possibly some pages have been
1112                          * allocated already.
1113                          */
1114                         rxr->rx_agg_prod = prod;
1115                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1116                         return NULL;
1117                 }
1118
1119                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1120                                      PCI_DMA_FROMDEVICE,
1121                                      DMA_ATTR_WEAK_ORDERING);
1122
1123                 skb->data_len += frag_len;
1124                 skb->len += frag_len;
1125                 skb->truesize += PAGE_SIZE;
1126
1127                 prod = NEXT_RX_AGG(prod);
1128         }
1129         rxr->rx_agg_prod = prod;
1130         return skb;
1131 }
1132
1133 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1134                                u8 agg_bufs, u32 *raw_cons)
1135 {
1136         u16 last;
1137         struct rx_agg_cmp *agg;
1138
1139         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1140         last = RING_CMP(*raw_cons);
1141         agg = (struct rx_agg_cmp *)
1142                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1143         return RX_AGG_CMP_VALID(agg, *raw_cons);
1144 }
1145
1146 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1147                                             unsigned int len,
1148                                             dma_addr_t mapping)
1149 {
1150         struct bnxt *bp = bnapi->bp;
1151         struct pci_dev *pdev = bp->pdev;
1152         struct sk_buff *skb;
1153
1154         skb = napi_alloc_skb(&bnapi->napi, len);
1155         if (!skb)
1156                 return NULL;
1157
1158         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1159                                 bp->rx_dir);
1160
1161         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1162                len + NET_IP_ALIGN);
1163
1164         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1165                                    bp->rx_dir);
1166
1167         skb_put(skb, len);
1168         return skb;
1169 }
1170
1171 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1172                            u32 *raw_cons, void *cmp)
1173 {
1174         struct rx_cmp *rxcmp = cmp;
1175         u32 tmp_raw_cons = *raw_cons;
1176         u8 cmp_type, agg_bufs = 0;
1177
1178         cmp_type = RX_CMP_TYPE(rxcmp);
1179
1180         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1181                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1182                             RX_CMP_AGG_BUFS) >>
1183                            RX_CMP_AGG_BUFS_SHIFT;
1184         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1185                 struct rx_tpa_end_cmp *tpa_end = cmp;
1186
1187                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1188                         return 0;
1189
1190                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1191         }
1192
1193         if (agg_bufs) {
1194                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1195                         return -EBUSY;
1196         }
1197         *raw_cons = tmp_raw_cons;
1198         return 0;
1199 }
1200
1201 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1202 {
1203         if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1204                 return;
1205
1206         if (BNXT_PF(bp))
1207                 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1208         else
1209                 schedule_delayed_work(&bp->fw_reset_task, delay);
1210 }
1211
1212 static void bnxt_queue_sp_work(struct bnxt *bp)
1213 {
1214         if (BNXT_PF(bp))
1215                 queue_work(bnxt_pf_wq, &bp->sp_task);
1216         else
1217                 schedule_work(&bp->sp_task);
1218 }
1219
1220 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1221 {
1222         if (!rxr->bnapi->in_reset) {
1223                 rxr->bnapi->in_reset = true;
1224                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1225                         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1226                 else
1227                         set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1228                 bnxt_queue_sp_work(bp);
1229         }
1230         rxr->rx_next_cons = 0xffff;
1231 }
1232
1233 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1234 {
1235         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1236         u16 idx = agg_id & MAX_TPA_P5_MASK;
1237
1238         if (test_bit(idx, map->agg_idx_bmap))
1239                 idx = find_first_zero_bit(map->agg_idx_bmap,
1240                                           BNXT_AGG_IDX_BMAP_SIZE);
1241         __set_bit(idx, map->agg_idx_bmap);
1242         map->agg_id_tbl[agg_id] = idx;
1243         return idx;
1244 }
1245
1246 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1247 {
1248         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1249
1250         __clear_bit(idx, map->agg_idx_bmap);
1251 }
1252
1253 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1254 {
1255         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1256
1257         return map->agg_id_tbl[agg_id];
1258 }
1259
1260 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1261                            struct rx_tpa_start_cmp *tpa_start,
1262                            struct rx_tpa_start_cmp_ext *tpa_start1)
1263 {
1264         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1265         struct bnxt_tpa_info *tpa_info;
1266         u16 cons, prod, agg_id;
1267         struct rx_bd *prod_bd;
1268         dma_addr_t mapping;
1269
1270         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1271                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1272                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1273         } else {
1274                 agg_id = TPA_START_AGG_ID(tpa_start);
1275         }
1276         cons = tpa_start->rx_tpa_start_cmp_opaque;
1277         prod = rxr->rx_prod;
1278         cons_rx_buf = &rxr->rx_buf_ring[cons];
1279         prod_rx_buf = &rxr->rx_buf_ring[prod];
1280         tpa_info = &rxr->rx_tpa[agg_id];
1281
1282         if (unlikely(cons != rxr->rx_next_cons ||
1283                      TPA_START_ERROR(tpa_start))) {
1284                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1285                             cons, rxr->rx_next_cons,
1286                             TPA_START_ERROR_CODE(tpa_start1));
1287                 bnxt_sched_reset(bp, rxr);
1288                 return;
1289         }
1290         /* Store cfa_code in tpa_info to use in tpa_end
1291          * completion processing.
1292          */
1293         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1294         prod_rx_buf->data = tpa_info->data;
1295         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1296
1297         mapping = tpa_info->mapping;
1298         prod_rx_buf->mapping = mapping;
1299
1300         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1301
1302         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1303
1304         tpa_info->data = cons_rx_buf->data;
1305         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1306         cons_rx_buf->data = NULL;
1307         tpa_info->mapping = cons_rx_buf->mapping;
1308
1309         tpa_info->len =
1310                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1311                                 RX_TPA_START_CMP_LEN_SHIFT;
1312         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1313                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1314
1315                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1316                 tpa_info->gso_type = SKB_GSO_TCPV4;
1317                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1318                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1319                         tpa_info->gso_type = SKB_GSO_TCPV6;
1320                 tpa_info->rss_hash =
1321                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1322         } else {
1323                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1324                 tpa_info->gso_type = 0;
1325                 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1326         }
1327         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1328         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1329         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1330         tpa_info->agg_count = 0;
1331
1332         rxr->rx_prod = NEXT_RX(prod);
1333         cons = NEXT_RX(cons);
1334         rxr->rx_next_cons = NEXT_RX(cons);
1335         cons_rx_buf = &rxr->rx_buf_ring[cons];
1336
1337         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1338         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1339         cons_rx_buf->data = NULL;
1340 }
1341
1342 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1343 {
1344         if (agg_bufs)
1345                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1346 }
1347
1348 #ifdef CONFIG_INET
1349 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1350 {
1351         struct udphdr *uh = NULL;
1352
1353         if (ip_proto == htons(ETH_P_IP)) {
1354                 struct iphdr *iph = (struct iphdr *)skb->data;
1355
1356                 if (iph->protocol == IPPROTO_UDP)
1357                         uh = (struct udphdr *)(iph + 1);
1358         } else {
1359                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1360
1361                 if (iph->nexthdr == IPPROTO_UDP)
1362                         uh = (struct udphdr *)(iph + 1);
1363         }
1364         if (uh) {
1365                 if (uh->check)
1366                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1367                 else
1368                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1369         }
1370 }
1371 #endif
1372
1373 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1374                                            int payload_off, int tcp_ts,
1375                                            struct sk_buff *skb)
1376 {
1377 #ifdef CONFIG_INET
1378         struct tcphdr *th;
1379         int len, nw_off;
1380         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1381         u32 hdr_info = tpa_info->hdr_info;
1382         bool loopback = false;
1383
1384         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1385         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1386         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1387
1388         /* If the packet is an internal loopback packet, the offsets will
1389          * have an extra 4 bytes.
1390          */
1391         if (inner_mac_off == 4) {
1392                 loopback = true;
1393         } else if (inner_mac_off > 4) {
1394                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1395                                             ETH_HLEN - 2));
1396
1397                 /* We only support inner iPv4/ipv6.  If we don't see the
1398                  * correct protocol ID, it must be a loopback packet where
1399                  * the offsets are off by 4.
1400                  */
1401                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1402                         loopback = true;
1403         }
1404         if (loopback) {
1405                 /* internal loopback packet, subtract all offsets by 4 */
1406                 inner_ip_off -= 4;
1407                 inner_mac_off -= 4;
1408                 outer_ip_off -= 4;
1409         }
1410
1411         nw_off = inner_ip_off - ETH_HLEN;
1412         skb_set_network_header(skb, nw_off);
1413         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1414                 struct ipv6hdr *iph = ipv6_hdr(skb);
1415
1416                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1417                 len = skb->len - skb_transport_offset(skb);
1418                 th = tcp_hdr(skb);
1419                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1420         } else {
1421                 struct iphdr *iph = ip_hdr(skb);
1422
1423                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1424                 len = skb->len - skb_transport_offset(skb);
1425                 th = tcp_hdr(skb);
1426                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1427         }
1428
1429         if (inner_mac_off) { /* tunnel */
1430                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1431                                             ETH_HLEN - 2));
1432
1433                 bnxt_gro_tunnel(skb, proto);
1434         }
1435 #endif
1436         return skb;
1437 }
1438
1439 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1440                                            int payload_off, int tcp_ts,
1441                                            struct sk_buff *skb)
1442 {
1443 #ifdef CONFIG_INET
1444         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1445         u32 hdr_info = tpa_info->hdr_info;
1446         int iphdr_len, nw_off;
1447
1448         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1449         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1450         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1451
1452         nw_off = inner_ip_off - ETH_HLEN;
1453         skb_set_network_header(skb, nw_off);
1454         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1455                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1456         skb_set_transport_header(skb, nw_off + iphdr_len);
1457
1458         if (inner_mac_off) { /* tunnel */
1459                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1460                                             ETH_HLEN - 2));
1461
1462                 bnxt_gro_tunnel(skb, proto);
1463         }
1464 #endif
1465         return skb;
1466 }
1467
1468 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1469 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1470
1471 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1472                                            int payload_off, int tcp_ts,
1473                                            struct sk_buff *skb)
1474 {
1475 #ifdef CONFIG_INET
1476         struct tcphdr *th;
1477         int len, nw_off, tcp_opt_len = 0;
1478
1479         if (tcp_ts)
1480                 tcp_opt_len = 12;
1481
1482         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1483                 struct iphdr *iph;
1484
1485                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1486                          ETH_HLEN;
1487                 skb_set_network_header(skb, nw_off);
1488                 iph = ip_hdr(skb);
1489                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1490                 len = skb->len - skb_transport_offset(skb);
1491                 th = tcp_hdr(skb);
1492                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1493         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1494                 struct ipv6hdr *iph;
1495
1496                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1497                          ETH_HLEN;
1498                 skb_set_network_header(skb, nw_off);
1499                 iph = ipv6_hdr(skb);
1500                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1501                 len = skb->len - skb_transport_offset(skb);
1502                 th = tcp_hdr(skb);
1503                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1504         } else {
1505                 dev_kfree_skb_any(skb);
1506                 return NULL;
1507         }
1508
1509         if (nw_off) /* tunnel */
1510                 bnxt_gro_tunnel(skb, skb->protocol);
1511 #endif
1512         return skb;
1513 }
1514
1515 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1516                                            struct bnxt_tpa_info *tpa_info,
1517                                            struct rx_tpa_end_cmp *tpa_end,
1518                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1519                                            struct sk_buff *skb)
1520 {
1521 #ifdef CONFIG_INET
1522         int payload_off;
1523         u16 segs;
1524
1525         segs = TPA_END_TPA_SEGS(tpa_end);
1526         if (segs == 1)
1527                 return skb;
1528
1529         NAPI_GRO_CB(skb)->count = segs;
1530         skb_shinfo(skb)->gso_size =
1531                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1532         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1533         if (bp->flags & BNXT_FLAG_CHIP_P5)
1534                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1535         else
1536                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1537         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1538         if (likely(skb))
1539                 tcp_gro_complete(skb);
1540 #endif
1541         return skb;
1542 }
1543
1544 /* Given the cfa_code of a received packet determine which
1545  * netdev (vf-rep or PF) the packet is destined to.
1546  */
1547 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1548 {
1549         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1550
1551         /* if vf-rep dev is NULL, the must belongs to the PF */
1552         return dev ? dev : bp->dev;
1553 }
1554
1555 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1556                                            struct bnxt_cp_ring_info *cpr,
1557                                            u32 *raw_cons,
1558                                            struct rx_tpa_end_cmp *tpa_end,
1559                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1560                                            u8 *event)
1561 {
1562         struct bnxt_napi *bnapi = cpr->bnapi;
1563         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1564         u8 *data_ptr, agg_bufs;
1565         unsigned int len;
1566         struct bnxt_tpa_info *tpa_info;
1567         dma_addr_t mapping;
1568         struct sk_buff *skb;
1569         u16 idx = 0, agg_id;
1570         void *data;
1571         bool gro;
1572
1573         if (unlikely(bnapi->in_reset)) {
1574                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1575
1576                 if (rc < 0)
1577                         return ERR_PTR(-EBUSY);
1578                 return NULL;
1579         }
1580
1581         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1582                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1583                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1584                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1585                 tpa_info = &rxr->rx_tpa[agg_id];
1586                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1587                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1588                                     agg_bufs, tpa_info->agg_count);
1589                         agg_bufs = tpa_info->agg_count;
1590                 }
1591                 tpa_info->agg_count = 0;
1592                 *event |= BNXT_AGG_EVENT;
1593                 bnxt_free_agg_idx(rxr, agg_id);
1594                 idx = agg_id;
1595                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1596         } else {
1597                 agg_id = TPA_END_AGG_ID(tpa_end);
1598                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1599                 tpa_info = &rxr->rx_tpa[agg_id];
1600                 idx = RING_CMP(*raw_cons);
1601                 if (agg_bufs) {
1602                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1603                                 return ERR_PTR(-EBUSY);
1604
1605                         *event |= BNXT_AGG_EVENT;
1606                         idx = NEXT_CMP(idx);
1607                 }
1608                 gro = !!TPA_END_GRO(tpa_end);
1609         }
1610         data = tpa_info->data;
1611         data_ptr = tpa_info->data_ptr;
1612         prefetch(data_ptr);
1613         len = tpa_info->len;
1614         mapping = tpa_info->mapping;
1615
1616         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1617                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1618                 if (agg_bufs > MAX_SKB_FRAGS)
1619                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1620                                     agg_bufs, (int)MAX_SKB_FRAGS);
1621                 return NULL;
1622         }
1623
1624         if (len <= bp->rx_copy_thresh) {
1625                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1626                 if (!skb) {
1627                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1628                         return NULL;
1629                 }
1630         } else {
1631                 u8 *new_data;
1632                 dma_addr_t new_mapping;
1633
1634                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1635                 if (!new_data) {
1636                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1637                         return NULL;
1638                 }
1639
1640                 tpa_info->data = new_data;
1641                 tpa_info->data_ptr = new_data + bp->rx_offset;
1642                 tpa_info->mapping = new_mapping;
1643
1644                 skb = build_skb(data, 0);
1645                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1646                                        bp->rx_buf_use_size, bp->rx_dir,
1647                                        DMA_ATTR_WEAK_ORDERING);
1648
1649                 if (!skb) {
1650                         kfree(data);
1651                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1652                         return NULL;
1653                 }
1654                 skb_reserve(skb, bp->rx_offset);
1655                 skb_put(skb, len);
1656         }
1657
1658         if (agg_bufs) {
1659                 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1660                 if (!skb) {
1661                         /* Page reuse already handled by bnxt_rx_pages(). */
1662                         return NULL;
1663                 }
1664         }
1665
1666         skb->protocol =
1667                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1668
1669         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1670                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1671
1672         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1673             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1674                 u16 vlan_proto = tpa_info->metadata >>
1675                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1676                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1677
1678                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1679         }
1680
1681         skb_checksum_none_assert(skb);
1682         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1683                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1684                 skb->csum_level =
1685                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1686         }
1687
1688         if (gro)
1689                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1690
1691         return skb;
1692 }
1693
1694 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1695                          struct rx_agg_cmp *rx_agg)
1696 {
1697         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1698         struct bnxt_tpa_info *tpa_info;
1699
1700         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1701         tpa_info = &rxr->rx_tpa[agg_id];
1702         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1703         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1704 }
1705
1706 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1707                              struct sk_buff *skb)
1708 {
1709         if (skb->dev != bp->dev) {
1710                 /* this packet belongs to a vf-rep */
1711                 bnxt_vf_rep_rx(bp, skb);
1712                 return;
1713         }
1714         skb_record_rx_queue(skb, bnapi->index);
1715         napi_gro_receive(&bnapi->napi, skb);
1716 }
1717
1718 /* returns the following:
1719  * 1       - 1 packet successfully received
1720  * 0       - successful TPA_START, packet not completed yet
1721  * -EBUSY  - completion ring does not have all the agg buffers yet
1722  * -ENOMEM - packet aborted due to out of memory
1723  * -EIO    - packet aborted due to hw error indicated in BD
1724  */
1725 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1726                        u32 *raw_cons, u8 *event)
1727 {
1728         struct bnxt_napi *bnapi = cpr->bnapi;
1729         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1730         struct net_device *dev = bp->dev;
1731         struct rx_cmp *rxcmp;
1732         struct rx_cmp_ext *rxcmp1;
1733         u32 tmp_raw_cons = *raw_cons;
1734         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1735         struct bnxt_sw_rx_bd *rx_buf;
1736         unsigned int len;
1737         u8 *data_ptr, agg_bufs, cmp_type;
1738         dma_addr_t dma_addr;
1739         struct sk_buff *skb;
1740         u32 flags, misc;
1741         void *data;
1742         int rc = 0;
1743
1744         rxcmp = (struct rx_cmp *)
1745                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1746
1747         cmp_type = RX_CMP_TYPE(rxcmp);
1748
1749         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1750                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1751                 goto next_rx_no_prod_no_len;
1752         }
1753
1754         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1755         cp_cons = RING_CMP(tmp_raw_cons);
1756         rxcmp1 = (struct rx_cmp_ext *)
1757                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1758
1759         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1760                 return -EBUSY;
1761
1762         prod = rxr->rx_prod;
1763
1764         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1765                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1766                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1767
1768                 *event |= BNXT_RX_EVENT;
1769                 goto next_rx_no_prod_no_len;
1770
1771         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1772                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1773                                    (struct rx_tpa_end_cmp *)rxcmp,
1774                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1775
1776                 if (IS_ERR(skb))
1777                         return -EBUSY;
1778
1779                 rc = -ENOMEM;
1780                 if (likely(skb)) {
1781                         bnxt_deliver_skb(bp, bnapi, skb);
1782                         rc = 1;
1783                 }
1784                 *event |= BNXT_RX_EVENT;
1785                 goto next_rx_no_prod_no_len;
1786         }
1787
1788         cons = rxcmp->rx_cmp_opaque;
1789         if (unlikely(cons != rxr->rx_next_cons)) {
1790                 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1791
1792                 /* 0xffff is forced error, don't print it */
1793                 if (rxr->rx_next_cons != 0xffff)
1794                         netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1795                                     cons, rxr->rx_next_cons);
1796                 bnxt_sched_reset(bp, rxr);
1797                 if (rc1)
1798                         return rc1;
1799                 goto next_rx_no_prod_no_len;
1800         }
1801         rx_buf = &rxr->rx_buf_ring[cons];
1802         data = rx_buf->data;
1803         data_ptr = rx_buf->data_ptr;
1804         prefetch(data_ptr);
1805
1806         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1807         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1808
1809         if (agg_bufs) {
1810                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1811                         return -EBUSY;
1812
1813                 cp_cons = NEXT_CMP(cp_cons);
1814                 *event |= BNXT_AGG_EVENT;
1815         }
1816         *event |= BNXT_RX_EVENT;
1817
1818         rx_buf->data = NULL;
1819         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1820                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1821
1822                 bnxt_reuse_rx_data(rxr, cons, data);
1823                 if (agg_bufs)
1824                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1825                                                false);
1826
1827                 rc = -EIO;
1828                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1829                         bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1830                         if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1831                             !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1832                                 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1833                                                  rx_err);
1834                                 bnxt_sched_reset(bp, rxr);
1835                         }
1836                 }
1837                 goto next_rx_no_len;
1838         }
1839
1840         flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1841         len = flags >> RX_CMP_LEN_SHIFT;
1842         dma_addr = rx_buf->mapping;
1843
1844         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1845                 rc = 1;
1846                 goto next_rx;
1847         }
1848
1849         if (len <= bp->rx_copy_thresh) {
1850                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1851                 bnxt_reuse_rx_data(rxr, cons, data);
1852                 if (!skb) {
1853                         if (agg_bufs)
1854                                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1855                                                        agg_bufs, false);
1856                         rc = -ENOMEM;
1857                         goto next_rx;
1858                 }
1859         } else {
1860                 u32 payload;
1861
1862                 if (rx_buf->data_ptr == data_ptr)
1863                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1864                 else
1865                         payload = 0;
1866                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1867                                       payload | len);
1868                 if (!skb) {
1869                         rc = -ENOMEM;
1870                         goto next_rx;
1871                 }
1872         }
1873
1874         if (agg_bufs) {
1875                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1876                 if (!skb) {
1877                         rc = -ENOMEM;
1878                         goto next_rx;
1879                 }
1880         }
1881
1882         if (RX_CMP_HASH_VALID(rxcmp)) {
1883                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1884                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1885
1886                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1887                 if (hash_type != 1 && hash_type != 3)
1888                         type = PKT_HASH_TYPE_L3;
1889                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1890         }
1891
1892         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1893         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1894
1895         if ((rxcmp1->rx_cmp_flags2 &
1896              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1897             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1898                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1899                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1900                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1901
1902                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1903         }
1904
1905         skb_checksum_none_assert(skb);
1906         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1907                 if (dev->features & NETIF_F_RXCSUM) {
1908                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1909                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1910                 }
1911         } else {
1912                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1913                         if (dev->features & NETIF_F_RXCSUM)
1914                                 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1915                 }
1916         }
1917
1918         if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1919                      RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1920                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1921                         u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1922                         u64 ns, ts;
1923
1924                         if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1925                                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1926
1927                                 spin_lock_bh(&ptp->ptp_lock);
1928                                 ns = timecounter_cyc2time(&ptp->tc, ts);
1929                                 spin_unlock_bh(&ptp->ptp_lock);
1930                                 memset(skb_hwtstamps(skb), 0,
1931                                        sizeof(*skb_hwtstamps(skb)));
1932                                 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1933                         }
1934                 }
1935         }
1936         bnxt_deliver_skb(bp, bnapi, skb);
1937         rc = 1;
1938
1939 next_rx:
1940         cpr->rx_packets += 1;
1941         cpr->rx_bytes += len;
1942
1943 next_rx_no_len:
1944         rxr->rx_prod = NEXT_RX(prod);
1945         rxr->rx_next_cons = NEXT_RX(cons);
1946
1947 next_rx_no_prod_no_len:
1948         *raw_cons = tmp_raw_cons;
1949
1950         return rc;
1951 }
1952
1953 /* In netpoll mode, if we are using a combined completion ring, we need to
1954  * discard the rx packets and recycle the buffers.
1955  */
1956 static int bnxt_force_rx_discard(struct bnxt *bp,
1957                                  struct bnxt_cp_ring_info *cpr,
1958                                  u32 *raw_cons, u8 *event)
1959 {
1960         u32 tmp_raw_cons = *raw_cons;
1961         struct rx_cmp_ext *rxcmp1;
1962         struct rx_cmp *rxcmp;
1963         u16 cp_cons;
1964         u8 cmp_type;
1965
1966         cp_cons = RING_CMP(tmp_raw_cons);
1967         rxcmp = (struct rx_cmp *)
1968                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1969
1970         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1971         cp_cons = RING_CMP(tmp_raw_cons);
1972         rxcmp1 = (struct rx_cmp_ext *)
1973                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1974
1975         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1976                 return -EBUSY;
1977
1978         cmp_type = RX_CMP_TYPE(rxcmp);
1979         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1980                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1981                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1982         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1983                 struct rx_tpa_end_cmp_ext *tpa_end1;
1984
1985                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1986                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1987                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1988         }
1989         return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1990 }
1991
1992 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1993 {
1994         struct bnxt_fw_health *fw_health = bp->fw_health;
1995         u32 reg = fw_health->regs[reg_idx];
1996         u32 reg_type, reg_off, val = 0;
1997
1998         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1999         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2000         switch (reg_type) {
2001         case BNXT_FW_HEALTH_REG_TYPE_CFG:
2002                 pci_read_config_dword(bp->pdev, reg_off, &val);
2003                 break;
2004         case BNXT_FW_HEALTH_REG_TYPE_GRC:
2005                 reg_off = fw_health->mapped_regs[reg_idx];
2006                 fallthrough;
2007         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2008                 val = readl(bp->bar0 + reg_off);
2009                 break;
2010         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2011                 val = readl(bp->bar1 + reg_off);
2012                 break;
2013         }
2014         if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2015                 val &= fw_health->fw_reset_inprog_reg_mask;
2016         return val;
2017 }
2018
2019 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2020 {
2021         int i;
2022
2023         for (i = 0; i < bp->rx_nr_rings; i++) {
2024                 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2025                 struct bnxt_ring_grp_info *grp_info;
2026
2027                 grp_info = &bp->grp_info[grp_idx];
2028                 if (grp_info->agg_fw_ring_id == ring_id)
2029                         return grp_idx;
2030         }
2031         return INVALID_HW_RING_ID;
2032 }
2033
2034 #define BNXT_GET_EVENT_PORT(data)       \
2035         ((data) &                       \
2036          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2037
2038 #define BNXT_EVENT_RING_TYPE(data2)     \
2039         ((data2) &                      \
2040          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2041
2042 #define BNXT_EVENT_RING_TYPE_RX(data2)  \
2043         (BNXT_EVENT_RING_TYPE(data2) == \
2044          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2045
2046 static int bnxt_async_event_process(struct bnxt *bp,
2047                                     struct hwrm_async_event_cmpl *cmpl)
2048 {
2049         u16 event_id = le16_to_cpu(cmpl->event_id);
2050         u32 data1 = le32_to_cpu(cmpl->event_data1);
2051         u32 data2 = le32_to_cpu(cmpl->event_data2);
2052
2053         /* TODO CHIMP_FW: Define event id's for link change, error etc */
2054         switch (event_id) {
2055         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2056                 struct bnxt_link_info *link_info = &bp->link_info;
2057
2058                 if (BNXT_VF(bp))
2059                         goto async_event_process_exit;
2060
2061                 /* print unsupported speed warning in forced speed mode only */
2062                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2063                     (data1 & 0x20000)) {
2064                         u16 fw_speed = link_info->force_link_speed;
2065                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2066
2067                         if (speed != SPEED_UNKNOWN)
2068                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2069                                             speed);
2070                 }
2071                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2072         }
2073                 fallthrough;
2074         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2075         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2076                 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2077                 fallthrough;
2078         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2079                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2080                 break;
2081         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2082                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2083                 break;
2084         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2085                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2086
2087                 if (BNXT_VF(bp))
2088                         break;
2089
2090                 if (bp->pf.port_id != port_id)
2091                         break;
2092
2093                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2094                 break;
2095         }
2096         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2097                 if (BNXT_PF(bp))
2098                         goto async_event_process_exit;
2099                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2100                 break;
2101         case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2102                 char *fatal_str = "non-fatal";
2103
2104                 if (!bp->fw_health)
2105                         goto async_event_process_exit;
2106
2107                 bp->fw_reset_timestamp = jiffies;
2108                 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2109                 if (!bp->fw_reset_min_dsecs)
2110                         bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2111                 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2112                 if (!bp->fw_reset_max_dsecs)
2113                         bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2114                 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2115                         fatal_str = "fatal";
2116                         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2117                 }
2118                 netif_warn(bp, hw, bp->dev,
2119                            "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2120                            fatal_str, data1, data2,
2121                            bp->fw_reset_min_dsecs * 100,
2122                            bp->fw_reset_max_dsecs * 100);
2123                 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2124                 break;
2125         }
2126         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2127                 struct bnxt_fw_health *fw_health = bp->fw_health;
2128
2129                 if (!fw_health)
2130                         goto async_event_process_exit;
2131
2132                 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2133                 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2134                 if (!fw_health->enabled) {
2135                         netif_info(bp, drv, bp->dev,
2136                                    "Error recovery info: error recovery[0]\n");
2137                         break;
2138                 }
2139                 fw_health->tmr_multiplier =
2140                         DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2141                                      bp->current_interval * 10);
2142                 fw_health->tmr_counter = fw_health->tmr_multiplier;
2143                 fw_health->last_fw_heartbeat =
2144                         bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2145                 fw_health->last_fw_reset_cnt =
2146                         bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2147                 netif_info(bp, drv, bp->dev,
2148                            "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2149                            fw_health->master, fw_health->last_fw_reset_cnt,
2150                            bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2151                 goto async_event_process_exit;
2152         }
2153         case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2154                 netif_notice(bp, hw, bp->dev,
2155                              "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2156                              data1, data2);
2157                 goto async_event_process_exit;
2158         case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2159                 struct bnxt_rx_ring_info *rxr;
2160                 u16 grp_idx;
2161
2162                 if (bp->flags & BNXT_FLAG_CHIP_P5)
2163                         goto async_event_process_exit;
2164
2165                 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2166                             BNXT_EVENT_RING_TYPE(data2), data1);
2167                 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2168                         goto async_event_process_exit;
2169
2170                 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2171                 if (grp_idx == INVALID_HW_RING_ID) {
2172                         netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2173                                     data1);
2174                         goto async_event_process_exit;
2175                 }
2176                 rxr = bp->bnapi[grp_idx]->rx_ring;
2177                 bnxt_sched_reset(bp, rxr);
2178                 goto async_event_process_exit;
2179         }
2180         case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2181                 struct bnxt_fw_health *fw_health = bp->fw_health;
2182
2183                 netif_notice(bp, hw, bp->dev,
2184                              "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2185                              data1, data2);
2186                 if (fw_health) {
2187                         fw_health->echo_req_data1 = data1;
2188                         fw_health->echo_req_data2 = data2;
2189                         set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2190                         break;
2191                 }
2192                 goto async_event_process_exit;
2193         }
2194         default:
2195                 goto async_event_process_exit;
2196         }
2197         bnxt_queue_sp_work(bp);
2198 async_event_process_exit:
2199         bnxt_ulp_async_events(bp, cmpl);
2200         return 0;
2201 }
2202
2203 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2204 {
2205         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2206         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2207         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2208                                 (struct hwrm_fwd_req_cmpl *)txcmp;
2209
2210         switch (cmpl_type) {
2211         case CMPL_BASE_TYPE_HWRM_DONE:
2212                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2213                 if (seq_id == bp->hwrm_intr_seq_id)
2214                         bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2215                 else
2216                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2217                 break;
2218
2219         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2220                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2221
2222                 if ((vf_id < bp->pf.first_vf_id) ||
2223                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2224                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2225                                    vf_id);
2226                         return -EINVAL;
2227                 }
2228
2229                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2230                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2231                 bnxt_queue_sp_work(bp);
2232                 break;
2233
2234         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2235                 bnxt_async_event_process(bp,
2236                                          (struct hwrm_async_event_cmpl *)txcmp);
2237                 break;
2238
2239         default:
2240                 break;
2241         }
2242
2243         return 0;
2244 }
2245
2246 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2247 {
2248         struct bnxt_napi *bnapi = dev_instance;
2249         struct bnxt *bp = bnapi->bp;
2250         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2251         u32 cons = RING_CMP(cpr->cp_raw_cons);
2252
2253         cpr->event_ctr++;
2254         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2255         napi_schedule(&bnapi->napi);
2256         return IRQ_HANDLED;
2257 }
2258
2259 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2260 {
2261         u32 raw_cons = cpr->cp_raw_cons;
2262         u16 cons = RING_CMP(raw_cons);
2263         struct tx_cmp *txcmp;
2264
2265         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2266
2267         return TX_CMP_VALID(txcmp, raw_cons);
2268 }
2269
2270 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2271 {
2272         struct bnxt_napi *bnapi = dev_instance;
2273         struct bnxt *bp = bnapi->bp;
2274         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2275         u32 cons = RING_CMP(cpr->cp_raw_cons);
2276         u32 int_status;
2277
2278         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2279
2280         if (!bnxt_has_work(bp, cpr)) {
2281                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2282                 /* return if erroneous interrupt */
2283                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2284                         return IRQ_NONE;
2285         }
2286
2287         /* disable ring IRQ */
2288         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2289
2290         /* Return here if interrupt is shared and is disabled. */
2291         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2292                 return IRQ_HANDLED;
2293
2294         napi_schedule(&bnapi->napi);
2295         return IRQ_HANDLED;
2296 }
2297
2298 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2299                             int budget)
2300 {
2301         struct bnxt_napi *bnapi = cpr->bnapi;
2302         u32 raw_cons = cpr->cp_raw_cons;
2303         u32 cons;
2304         int tx_pkts = 0;
2305         int rx_pkts = 0;
2306         u8 event = 0;
2307         struct tx_cmp *txcmp;
2308
2309         cpr->has_more_work = 0;
2310         cpr->had_work_done = 1;
2311         while (1) {
2312                 int rc;
2313
2314                 cons = RING_CMP(raw_cons);
2315                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2316
2317                 if (!TX_CMP_VALID(txcmp, raw_cons))
2318                         break;
2319
2320                 /* The valid test of the entry must be done first before
2321                  * reading any further.
2322                  */
2323                 dma_rmb();
2324                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2325                         tx_pkts++;
2326                         /* return full budget so NAPI will complete. */
2327                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2328                                 rx_pkts = budget;
2329                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2330                                 if (budget)
2331                                         cpr->has_more_work = 1;
2332                                 break;
2333                         }
2334                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2335                         if (likely(budget))
2336                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2337                         else
2338                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2339                                                            &event);
2340                         if (likely(rc >= 0))
2341                                 rx_pkts += rc;
2342                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2343                          * the NAPI budget.  Otherwise, we may potentially loop
2344                          * here forever if we consistently cannot allocate
2345                          * buffers.
2346                          */
2347                         else if (rc == -ENOMEM && budget)
2348                                 rx_pkts++;
2349                         else if (rc == -EBUSY)  /* partial completion */
2350                                 break;
2351                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2352                                      CMPL_BASE_TYPE_HWRM_DONE) ||
2353                                     (TX_CMP_TYPE(txcmp) ==
2354                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2355                                     (TX_CMP_TYPE(txcmp) ==
2356                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2357                         bnxt_hwrm_handler(bp, txcmp);
2358                 }
2359                 raw_cons = NEXT_RAW_CMP(raw_cons);
2360
2361                 if (rx_pkts && rx_pkts == budget) {
2362                         cpr->has_more_work = 1;
2363                         break;
2364                 }
2365         }
2366
2367         if (event & BNXT_REDIRECT_EVENT)
2368                 xdp_do_flush_map();
2369
2370         if (event & BNXT_TX_EVENT) {
2371                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2372                 u16 prod = txr->tx_prod;
2373
2374                 /* Sync BD data before updating doorbell */
2375                 wmb();
2376
2377                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2378         }
2379
2380         cpr->cp_raw_cons = raw_cons;
2381         bnapi->tx_pkts += tx_pkts;
2382         bnapi->events |= event;
2383         return rx_pkts;
2384 }
2385
2386 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2387 {
2388         if (bnapi->tx_pkts) {
2389                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2390                 bnapi->tx_pkts = 0;
2391         }
2392
2393         if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2394                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2395
2396                 if (bnapi->events & BNXT_AGG_EVENT)
2397                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2398                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2399         }
2400         bnapi->events = 0;
2401 }
2402
2403 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2404                           int budget)
2405 {
2406         struct bnxt_napi *bnapi = cpr->bnapi;
2407         int rx_pkts;
2408
2409         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2410
2411         /* ACK completion ring before freeing tx ring and producing new
2412          * buffers in rx/agg rings to prevent overflowing the completion
2413          * ring.
2414          */
2415         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2416
2417         __bnxt_poll_work_done(bp, bnapi);
2418         return rx_pkts;
2419 }
2420
2421 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2422 {
2423         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2424         struct bnxt *bp = bnapi->bp;
2425         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2426         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2427         struct tx_cmp *txcmp;
2428         struct rx_cmp_ext *rxcmp1;
2429         u32 cp_cons, tmp_raw_cons;
2430         u32 raw_cons = cpr->cp_raw_cons;
2431         u32 rx_pkts = 0;
2432         u8 event = 0;
2433
2434         while (1) {
2435                 int rc;
2436
2437                 cp_cons = RING_CMP(raw_cons);
2438                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2439
2440                 if (!TX_CMP_VALID(txcmp, raw_cons))
2441                         break;
2442
2443                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2444                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2445                         cp_cons = RING_CMP(tmp_raw_cons);
2446                         rxcmp1 = (struct rx_cmp_ext *)
2447                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2448
2449                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2450                                 break;
2451
2452                         /* force an error to recycle the buffer */
2453                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2454                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2455
2456                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2457                         if (likely(rc == -EIO) && budget)
2458                                 rx_pkts++;
2459                         else if (rc == -EBUSY)  /* partial completion */
2460                                 break;
2461                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2462                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2463                         bnxt_hwrm_handler(bp, txcmp);
2464                 } else {
2465                         netdev_err(bp->dev,
2466                                    "Invalid completion received on special ring\n");
2467                 }
2468                 raw_cons = NEXT_RAW_CMP(raw_cons);
2469
2470                 if (rx_pkts == budget)
2471                         break;
2472         }
2473
2474         cpr->cp_raw_cons = raw_cons;
2475         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2476         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2477
2478         if (event & BNXT_AGG_EVENT)
2479                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2480
2481         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2482                 napi_complete_done(napi, rx_pkts);
2483                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2484         }
2485         return rx_pkts;
2486 }
2487
2488 static int bnxt_poll(struct napi_struct *napi, int budget)
2489 {
2490         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2491         struct bnxt *bp = bnapi->bp;
2492         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2493         int work_done = 0;
2494
2495         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2496                 napi_complete(napi);
2497                 return 0;
2498         }
2499         while (1) {
2500                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2501
2502                 if (work_done >= budget) {
2503                         if (!budget)
2504                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2505                         break;
2506                 }
2507
2508                 if (!bnxt_has_work(bp, cpr)) {
2509                         if (napi_complete_done(napi, work_done))
2510                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2511                         break;
2512                 }
2513         }
2514         if (bp->flags & BNXT_FLAG_DIM) {
2515                 struct dim_sample dim_sample = {};
2516
2517                 dim_update_sample(cpr->event_ctr,
2518                                   cpr->rx_packets,
2519                                   cpr->rx_bytes,
2520                                   &dim_sample);
2521                 net_dim(&cpr->dim, dim_sample);
2522         }
2523         return work_done;
2524 }
2525
2526 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2527 {
2528         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2529         int i, work_done = 0;
2530
2531         for (i = 0; i < 2; i++) {
2532                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2533
2534                 if (cpr2) {
2535                         work_done += __bnxt_poll_work(bp, cpr2,
2536                                                       budget - work_done);
2537                         cpr->has_more_work |= cpr2->has_more_work;
2538                 }
2539         }
2540         return work_done;
2541 }
2542
2543 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2544                                  u64 dbr_type)
2545 {
2546         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2547         int i;
2548
2549         for (i = 0; i < 2; i++) {
2550                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2551                 struct bnxt_db_info *db;
2552
2553                 if (cpr2 && cpr2->had_work_done) {
2554                         db = &cpr2->cp_db;
2555                         writeq(db->db_key64 | dbr_type |
2556                                RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2557                         cpr2->had_work_done = 0;
2558                 }
2559         }
2560         __bnxt_poll_work_done(bp, bnapi);
2561 }
2562
2563 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2564 {
2565         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2566         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2567         u32 raw_cons = cpr->cp_raw_cons;
2568         struct bnxt *bp = bnapi->bp;
2569         struct nqe_cn *nqcmp;
2570         int work_done = 0;
2571         u32 cons;
2572
2573         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2574                 napi_complete(napi);
2575                 return 0;
2576         }
2577         if (cpr->has_more_work) {
2578                 cpr->has_more_work = 0;
2579                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2580         }
2581         while (1) {
2582                 cons = RING_CMP(raw_cons);
2583                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2584
2585                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2586                         if (cpr->has_more_work)
2587                                 break;
2588
2589                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2590                         cpr->cp_raw_cons = raw_cons;
2591                         if (napi_complete_done(napi, work_done))
2592                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2593                                                   cpr->cp_raw_cons);
2594                         return work_done;
2595                 }
2596
2597                 /* The valid test of the entry must be done first before
2598                  * reading any further.
2599                  */
2600                 dma_rmb();
2601
2602                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2603                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2604                         struct bnxt_cp_ring_info *cpr2;
2605
2606                         cpr2 = cpr->cp_ring_arr[idx];
2607                         work_done += __bnxt_poll_work(bp, cpr2,
2608                                                       budget - work_done);
2609                         cpr->has_more_work |= cpr2->has_more_work;
2610                 } else {
2611                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2612                 }
2613                 raw_cons = NEXT_RAW_CMP(raw_cons);
2614         }
2615         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2616         if (raw_cons != cpr->cp_raw_cons) {
2617                 cpr->cp_raw_cons = raw_cons;
2618                 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2619         }
2620         return work_done;
2621 }
2622
2623 static void bnxt_free_tx_skbs(struct bnxt *bp)
2624 {
2625         int i, max_idx;
2626         struct pci_dev *pdev = bp->pdev;
2627
2628         if (!bp->tx_ring)
2629                 return;
2630
2631         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2632         for (i = 0; i < bp->tx_nr_rings; i++) {
2633                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2634                 int j;
2635
2636                 for (j = 0; j < max_idx;) {
2637                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2638                         struct sk_buff *skb;
2639                         int k, last;
2640
2641                         if (i < bp->tx_nr_rings_xdp &&
2642                             tx_buf->action == XDP_REDIRECT) {
2643                                 dma_unmap_single(&pdev->dev,
2644                                         dma_unmap_addr(tx_buf, mapping),
2645                                         dma_unmap_len(tx_buf, len),
2646                                         PCI_DMA_TODEVICE);
2647                                 xdp_return_frame(tx_buf->xdpf);
2648                                 tx_buf->action = 0;
2649                                 tx_buf->xdpf = NULL;
2650                                 j++;
2651                                 continue;
2652                         }
2653
2654                         skb = tx_buf->skb;
2655                         if (!skb) {
2656                                 j++;
2657                                 continue;
2658                         }
2659
2660                         tx_buf->skb = NULL;
2661
2662                         if (tx_buf->is_push) {
2663                                 dev_kfree_skb(skb);
2664                                 j += 2;
2665                                 continue;
2666                         }
2667
2668                         dma_unmap_single(&pdev->dev,
2669                                          dma_unmap_addr(tx_buf, mapping),
2670                                          skb_headlen(skb),
2671                                          PCI_DMA_TODEVICE);
2672
2673                         last = tx_buf->nr_frags;
2674                         j += 2;
2675                         for (k = 0; k < last; k++, j++) {
2676                                 int ring_idx = j & bp->tx_ring_mask;
2677                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2678
2679                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2680                                 dma_unmap_page(
2681                                         &pdev->dev,
2682                                         dma_unmap_addr(tx_buf, mapping),
2683                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
2684                         }
2685                         dev_kfree_skb(skb);
2686                 }
2687                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2688         }
2689 }
2690
2691 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2692 {
2693         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2694         struct pci_dev *pdev = bp->pdev;
2695         struct bnxt_tpa_idx_map *map;
2696         int i, max_idx, max_agg_idx;
2697
2698         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2699         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2700         if (!rxr->rx_tpa)
2701                 goto skip_rx_tpa_free;
2702
2703         for (i = 0; i < bp->max_tpa; i++) {
2704                 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2705                 u8 *data = tpa_info->data;
2706
2707                 if (!data)
2708                         continue;
2709
2710                 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2711                                        bp->rx_buf_use_size, bp->rx_dir,
2712                                        DMA_ATTR_WEAK_ORDERING);
2713
2714                 tpa_info->data = NULL;
2715
2716                 kfree(data);
2717         }
2718
2719 skip_rx_tpa_free:
2720         for (i = 0; i < max_idx; i++) {
2721                 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2722                 dma_addr_t mapping = rx_buf->mapping;
2723                 void *data = rx_buf->data;
2724
2725                 if (!data)
2726                         continue;
2727
2728                 rx_buf->data = NULL;
2729                 if (BNXT_RX_PAGE_MODE(bp)) {
2730                         mapping -= bp->rx_dma_offset;
2731                         dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2732                                              bp->rx_dir,
2733                                              DMA_ATTR_WEAK_ORDERING);
2734                         page_pool_recycle_direct(rxr->page_pool, data);
2735                 } else {
2736                         dma_unmap_single_attrs(&pdev->dev, mapping,
2737                                                bp->rx_buf_use_size, bp->rx_dir,
2738                                                DMA_ATTR_WEAK_ORDERING);
2739                         kfree(data);
2740                 }
2741         }
2742         for (i = 0; i < max_agg_idx; i++) {
2743                 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2744                 struct page *page = rx_agg_buf->page;
2745
2746                 if (!page)
2747                         continue;
2748
2749                 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2750                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
2751                                      DMA_ATTR_WEAK_ORDERING);
2752
2753                 rx_agg_buf->page = NULL;
2754                 __clear_bit(i, rxr->rx_agg_bmap);
2755
2756                 __free_page(page);
2757         }
2758         if (rxr->rx_page) {
2759                 __free_page(rxr->rx_page);
2760                 rxr->rx_page = NULL;
2761         }
2762         map = rxr->rx_tpa_idx_map;
2763         if (map)
2764                 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2765 }
2766
2767 static void bnxt_free_rx_skbs(struct bnxt *bp)
2768 {
2769         int i;
2770
2771         if (!bp->rx_ring)
2772                 return;
2773
2774         for (i = 0; i < bp->rx_nr_rings; i++)
2775                 bnxt_free_one_rx_ring_skbs(bp, i);
2776 }
2777
2778 static void bnxt_free_skbs(struct bnxt *bp)
2779 {
2780         bnxt_free_tx_skbs(bp);
2781         bnxt_free_rx_skbs(bp);
2782 }
2783
2784 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2785 {
2786         u8 init_val = mem_init->init_val;
2787         u16 offset = mem_init->offset;
2788         u8 *p2 = p;
2789         int i;
2790
2791         if (!init_val)
2792                 return;
2793         if (offset == BNXT_MEM_INVALID_OFFSET) {
2794                 memset(p, init_val, len);
2795                 return;
2796         }
2797         for (i = 0; i < len; i += mem_init->size)
2798                 *(p2 + i + offset) = init_val;
2799 }
2800
2801 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2802 {
2803         struct pci_dev *pdev = bp->pdev;
2804         int i;
2805
2806         for (i = 0; i < rmem->nr_pages; i++) {
2807                 if (!rmem->pg_arr[i])
2808                         continue;
2809
2810                 dma_free_coherent(&pdev->dev, rmem->page_size,
2811                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2812
2813                 rmem->pg_arr[i] = NULL;
2814         }
2815         if (rmem->pg_tbl) {
2816                 size_t pg_tbl_size = rmem->nr_pages * 8;
2817
2818                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2819                         pg_tbl_size = rmem->page_size;
2820                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2821                                   rmem->pg_tbl, rmem->pg_tbl_map);
2822                 rmem->pg_tbl = NULL;
2823         }
2824         if (rmem->vmem_size && *rmem->vmem) {
2825                 vfree(*rmem->vmem);
2826                 *rmem->vmem = NULL;
2827         }
2828 }
2829
2830 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2831 {
2832         struct pci_dev *pdev = bp->pdev;
2833         u64 valid_bit = 0;
2834         int i;
2835
2836         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2837                 valid_bit = PTU_PTE_VALID;
2838         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2839                 size_t pg_tbl_size = rmem->nr_pages * 8;
2840
2841                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2842                         pg_tbl_size = rmem->page_size;
2843                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2844                                                   &rmem->pg_tbl_map,
2845                                                   GFP_KERNEL);
2846                 if (!rmem->pg_tbl)
2847                         return -ENOMEM;
2848         }
2849
2850         for (i = 0; i < rmem->nr_pages; i++) {
2851                 u64 extra_bits = valid_bit;
2852
2853                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2854                                                      rmem->page_size,
2855                                                      &rmem->dma_arr[i],
2856                                                      GFP_KERNEL);
2857                 if (!rmem->pg_arr[i])
2858                         return -ENOMEM;
2859
2860                 if (rmem->mem_init)
2861                         bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2862                                           rmem->page_size);
2863                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2864                         if (i == rmem->nr_pages - 2 &&
2865                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2866                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2867                         else if (i == rmem->nr_pages - 1 &&
2868                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2869                                 extra_bits |= PTU_PTE_LAST;
2870                         rmem->pg_tbl[i] =
2871                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2872                 }
2873         }
2874
2875         if (rmem->vmem_size) {
2876                 *rmem->vmem = vzalloc(rmem->vmem_size);
2877                 if (!(*rmem->vmem))
2878                         return -ENOMEM;
2879         }
2880         return 0;
2881 }
2882
2883 static void bnxt_free_tpa_info(struct bnxt *bp)
2884 {
2885         int i;
2886
2887         for (i = 0; i < bp->rx_nr_rings; i++) {
2888                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2889
2890                 kfree(rxr->rx_tpa_idx_map);
2891                 rxr->rx_tpa_idx_map = NULL;
2892                 if (rxr->rx_tpa) {
2893                         kfree(rxr->rx_tpa[0].agg_arr);
2894                         rxr->rx_tpa[0].agg_arr = NULL;
2895                 }
2896                 kfree(rxr->rx_tpa);
2897                 rxr->rx_tpa = NULL;
2898         }
2899 }
2900
2901 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2902 {
2903         int i, j, total_aggs = 0;
2904
2905         bp->max_tpa = MAX_TPA;
2906         if (bp->flags & BNXT_FLAG_CHIP_P5) {
2907                 if (!bp->max_tpa_v2)
2908                         return 0;
2909                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2910                 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2911         }
2912
2913         for (i = 0; i < bp->rx_nr_rings; i++) {
2914                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2915                 struct rx_agg_cmp *agg;
2916
2917                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2918                                       GFP_KERNEL);
2919                 if (!rxr->rx_tpa)
2920                         return -ENOMEM;
2921
2922                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2923                         continue;
2924                 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2925                 rxr->rx_tpa[0].agg_arr = agg;
2926                 if (!agg)
2927                         return -ENOMEM;
2928                 for (j = 1; j < bp->max_tpa; j++)
2929                         rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2930                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2931                                               GFP_KERNEL);
2932                 if (!rxr->rx_tpa_idx_map)
2933                         return -ENOMEM;
2934         }
2935         return 0;
2936 }
2937
2938 static void bnxt_free_rx_rings(struct bnxt *bp)
2939 {
2940         int i;
2941
2942         if (!bp->rx_ring)
2943                 return;
2944
2945         bnxt_free_tpa_info(bp);
2946         for (i = 0; i < bp->rx_nr_rings; i++) {
2947                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2948                 struct bnxt_ring_struct *ring;
2949
2950                 if (rxr->xdp_prog)
2951                         bpf_prog_put(rxr->xdp_prog);
2952
2953                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2954                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2955
2956                 page_pool_destroy(rxr->page_pool);
2957                 rxr->page_pool = NULL;
2958
2959                 kfree(rxr->rx_agg_bmap);
2960                 rxr->rx_agg_bmap = NULL;
2961
2962                 ring = &rxr->rx_ring_struct;
2963                 bnxt_free_ring(bp, &ring->ring_mem);
2964
2965                 ring = &rxr->rx_agg_ring_struct;
2966                 bnxt_free_ring(bp, &ring->ring_mem);
2967         }
2968 }
2969
2970 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2971                                    struct bnxt_rx_ring_info *rxr)
2972 {
2973         struct page_pool_params pp = { 0 };
2974
2975         pp.pool_size = bp->rx_ring_size;
2976         pp.nid = dev_to_node(&bp->pdev->dev);
2977         pp.dev = &bp->pdev->dev;
2978         pp.dma_dir = DMA_BIDIRECTIONAL;
2979
2980         rxr->page_pool = page_pool_create(&pp);
2981         if (IS_ERR(rxr->page_pool)) {
2982                 int err = PTR_ERR(rxr->page_pool);
2983
2984                 rxr->page_pool = NULL;
2985                 return err;
2986         }
2987         return 0;
2988 }
2989
2990 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2991 {
2992         int i, rc = 0, agg_rings = 0;
2993
2994         if (!bp->rx_ring)
2995                 return -ENOMEM;
2996
2997         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2998                 agg_rings = 1;
2999
3000         for (i = 0; i < bp->rx_nr_rings; i++) {
3001                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3002                 struct bnxt_ring_struct *ring;
3003
3004                 ring = &rxr->rx_ring_struct;
3005
3006                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3007                 if (rc)
3008                         return rc;
3009
3010                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3011                 if (rc < 0)
3012                         return rc;
3013
3014                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3015                                                 MEM_TYPE_PAGE_POOL,
3016                                                 rxr->page_pool);
3017                 if (rc) {
3018                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3019                         return rc;
3020                 }
3021
3022                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3023                 if (rc)
3024                         return rc;
3025
3026                 ring->grp_idx = i;
3027                 if (agg_rings) {
3028                         u16 mem_size;
3029
3030                         ring = &rxr->rx_agg_ring_struct;
3031                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3032                         if (rc)
3033                                 return rc;
3034
3035                         ring->grp_idx = i;
3036                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3037                         mem_size = rxr->rx_agg_bmap_size / 8;
3038                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3039                         if (!rxr->rx_agg_bmap)
3040                                 return -ENOMEM;
3041                 }
3042         }
3043         if (bp->flags & BNXT_FLAG_TPA)
3044                 rc = bnxt_alloc_tpa_info(bp);
3045         return rc;
3046 }
3047
3048 static void bnxt_free_tx_rings(struct bnxt *bp)
3049 {
3050         int i;
3051         struct pci_dev *pdev = bp->pdev;
3052
3053         if (!bp->tx_ring)
3054                 return;
3055
3056         for (i = 0; i < bp->tx_nr_rings; i++) {
3057                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3058                 struct bnxt_ring_struct *ring;
3059
3060                 if (txr->tx_push) {
3061                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
3062                                           txr->tx_push, txr->tx_push_mapping);
3063                         txr->tx_push = NULL;
3064                 }
3065
3066                 ring = &txr->tx_ring_struct;
3067
3068                 bnxt_free_ring(bp, &ring->ring_mem);
3069         }
3070 }
3071
3072 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3073 {
3074         int i, j, rc;
3075         struct pci_dev *pdev = bp->pdev;
3076
3077         bp->tx_push_size = 0;
3078         if (bp->tx_push_thresh) {
3079                 int push_size;
3080
3081                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3082                                         bp->tx_push_thresh);
3083
3084                 if (push_size > 256) {
3085                         push_size = 0;
3086                         bp->tx_push_thresh = 0;
3087                 }
3088
3089                 bp->tx_push_size = push_size;
3090         }
3091
3092         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3093                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3094                 struct bnxt_ring_struct *ring;
3095                 u8 qidx;
3096
3097                 ring = &txr->tx_ring_struct;
3098
3099                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3100                 if (rc)
3101                         return rc;
3102
3103                 ring->grp_idx = txr->bnapi->index;
3104                 if (bp->tx_push_size) {
3105                         dma_addr_t mapping;
3106
3107                         /* One pre-allocated DMA buffer to backup
3108                          * TX push operation
3109                          */
3110                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
3111                                                 bp->tx_push_size,
3112                                                 &txr->tx_push_mapping,
3113                                                 GFP_KERNEL);
3114
3115                         if (!txr->tx_push)
3116                                 return -ENOMEM;
3117
3118                         mapping = txr->tx_push_mapping +
3119                                 sizeof(struct tx_push_bd);
3120                         txr->data_mapping = cpu_to_le64(mapping);
3121                 }
3122                 qidx = bp->tc_to_qidx[j];
3123                 ring->queue_id = bp->q_info[qidx].queue_id;
3124                 if (i < bp->tx_nr_rings_xdp)
3125                         continue;
3126                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3127                         j++;
3128         }
3129         return 0;
3130 }
3131
3132 static void bnxt_free_cp_rings(struct bnxt *bp)
3133 {
3134         int i;
3135
3136         if (!bp->bnapi)
3137                 return;
3138
3139         for (i = 0; i < bp->cp_nr_rings; i++) {
3140                 struct bnxt_napi *bnapi = bp->bnapi[i];
3141                 struct bnxt_cp_ring_info *cpr;
3142                 struct bnxt_ring_struct *ring;
3143                 int j;
3144
3145                 if (!bnapi)
3146                         continue;
3147
3148                 cpr = &bnapi->cp_ring;
3149                 ring = &cpr->cp_ring_struct;
3150
3151                 bnxt_free_ring(bp, &ring->ring_mem);
3152
3153                 for (j = 0; j < 2; j++) {
3154                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3155
3156                         if (cpr2) {
3157                                 ring = &cpr2->cp_ring_struct;
3158                                 bnxt_free_ring(bp, &ring->ring_mem);
3159                                 kfree(cpr2);
3160                                 cpr->cp_ring_arr[j] = NULL;
3161                         }
3162                 }
3163         }
3164 }
3165
3166 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3167 {
3168         struct bnxt_ring_mem_info *rmem;
3169         struct bnxt_ring_struct *ring;
3170         struct bnxt_cp_ring_info *cpr;
3171         int rc;
3172
3173         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3174         if (!cpr)
3175                 return NULL;
3176
3177         ring = &cpr->cp_ring_struct;
3178         rmem = &ring->ring_mem;
3179         rmem->nr_pages = bp->cp_nr_pages;
3180         rmem->page_size = HW_CMPD_RING_SIZE;
3181         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3182         rmem->dma_arr = cpr->cp_desc_mapping;
3183         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3184         rc = bnxt_alloc_ring(bp, rmem);
3185         if (rc) {
3186                 bnxt_free_ring(bp, rmem);
3187                 kfree(cpr);
3188                 cpr = NULL;
3189         }
3190         return cpr;
3191 }
3192
3193 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3194 {
3195         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3196         int i, rc, ulp_base_vec, ulp_msix;
3197
3198         ulp_msix = bnxt_get_ulp_msix_num(bp);
3199         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3200         for (i = 0; i < bp->cp_nr_rings; i++) {
3201                 struct bnxt_napi *bnapi = bp->bnapi[i];
3202                 struct bnxt_cp_ring_info *cpr;
3203                 struct bnxt_ring_struct *ring;
3204
3205                 if (!bnapi)
3206                         continue;
3207
3208                 cpr = &bnapi->cp_ring;
3209                 cpr->bnapi = bnapi;
3210                 ring = &cpr->cp_ring_struct;
3211
3212                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3213                 if (rc)
3214                         return rc;
3215
3216                 if (ulp_msix && i >= ulp_base_vec)
3217                         ring->map_idx = i + ulp_msix;
3218                 else
3219                         ring->map_idx = i;
3220
3221                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3222                         continue;
3223
3224                 if (i < bp->rx_nr_rings) {
3225                         struct bnxt_cp_ring_info *cpr2 =
3226                                 bnxt_alloc_cp_sub_ring(bp);
3227
3228                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3229                         if (!cpr2)
3230                                 return -ENOMEM;
3231                         cpr2->bnapi = bnapi;
3232                 }
3233                 if ((sh && i < bp->tx_nr_rings) ||
3234                     (!sh && i >= bp->rx_nr_rings)) {
3235                         struct bnxt_cp_ring_info *cpr2 =
3236                                 bnxt_alloc_cp_sub_ring(bp);
3237
3238                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3239                         if (!cpr2)
3240                                 return -ENOMEM;
3241                         cpr2->bnapi = bnapi;
3242                 }
3243         }
3244         return 0;
3245 }
3246
3247 static void bnxt_init_ring_struct(struct bnxt *bp)
3248 {
3249         int i;
3250
3251         for (i = 0; i < bp->cp_nr_rings; i++) {
3252                 struct bnxt_napi *bnapi = bp->bnapi[i];
3253                 struct bnxt_ring_mem_info *rmem;
3254                 struct bnxt_cp_ring_info *cpr;
3255                 struct bnxt_rx_ring_info *rxr;
3256                 struct bnxt_tx_ring_info *txr;
3257                 struct bnxt_ring_struct *ring;
3258
3259                 if (!bnapi)
3260                         continue;
3261
3262                 cpr = &bnapi->cp_ring;
3263                 ring = &cpr->cp_ring_struct;
3264                 rmem = &ring->ring_mem;
3265                 rmem->nr_pages = bp->cp_nr_pages;
3266                 rmem->page_size = HW_CMPD_RING_SIZE;
3267                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3268                 rmem->dma_arr = cpr->cp_desc_mapping;
3269                 rmem->vmem_size = 0;
3270
3271                 rxr = bnapi->rx_ring;
3272                 if (!rxr)
3273                         goto skip_rx;
3274
3275                 ring = &rxr->rx_ring_struct;
3276                 rmem = &ring->ring_mem;
3277                 rmem->nr_pages = bp->rx_nr_pages;
3278                 rmem->page_size = HW_RXBD_RING_SIZE;
3279                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3280                 rmem->dma_arr = rxr->rx_desc_mapping;
3281                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3282                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3283
3284                 ring = &rxr->rx_agg_ring_struct;
3285                 rmem = &ring->ring_mem;
3286                 rmem->nr_pages = bp->rx_agg_nr_pages;
3287                 rmem->page_size = HW_RXBD_RING_SIZE;
3288                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3289                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3290                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3291                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3292
3293 skip_rx:
3294                 txr = bnapi->tx_ring;
3295                 if (!txr)
3296                         continue;
3297
3298                 ring = &txr->tx_ring_struct;
3299                 rmem = &ring->ring_mem;
3300                 rmem->nr_pages = bp->tx_nr_pages;
3301                 rmem->page_size = HW_RXBD_RING_SIZE;
3302                 rmem->pg_arr = (void **)txr->tx_desc_ring;
3303                 rmem->dma_arr = txr->tx_desc_mapping;
3304                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3305                 rmem->vmem = (void **)&txr->tx_buf_ring;
3306         }
3307 }
3308
3309 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3310 {
3311         int i;
3312         u32 prod;
3313         struct rx_bd **rx_buf_ring;
3314
3315         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3316         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3317                 int j;
3318                 struct rx_bd *rxbd;
3319
3320                 rxbd = rx_buf_ring[i];
3321                 if (!rxbd)
3322                         continue;
3323
3324                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3325                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3326                         rxbd->rx_bd_opaque = prod;
3327                 }
3328         }
3329 }
3330
3331 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3332 {
3333         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3334         struct net_device *dev = bp->dev;
3335         u32 prod;
3336         int i;
3337
3338         prod = rxr->rx_prod;
3339         for (i = 0; i < bp->rx_ring_size; i++) {
3340                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3341                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3342                                     ring_nr, i, bp->rx_ring_size);
3343                         break;
3344                 }
3345                 prod = NEXT_RX(prod);
3346         }
3347         rxr->rx_prod = prod;
3348
3349         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3350                 return 0;
3351
3352         prod = rxr->rx_agg_prod;
3353         for (i = 0; i < bp->rx_agg_ring_size; i++) {
3354                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3355                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3356                                     ring_nr, i, bp->rx_ring_size);
3357                         break;
3358                 }
3359                 prod = NEXT_RX_AGG(prod);
3360         }
3361         rxr->rx_agg_prod = prod;
3362
3363         if (rxr->rx_tpa) {
3364                 dma_addr_t mapping;
3365                 u8 *data;
3366
3367                 for (i = 0; i < bp->max_tpa; i++) {
3368                         data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3369                         if (!data)
3370                                 return -ENOMEM;
3371
3372                         rxr->rx_tpa[i].data = data;
3373                         rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3374                         rxr->rx_tpa[i].mapping = mapping;
3375                 }
3376         }
3377         return 0;
3378 }
3379
3380 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3381 {
3382         struct bnxt_rx_ring_info *rxr;
3383         struct bnxt_ring_struct *ring;
3384         u32 type;
3385
3386         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3387                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3388
3389         if (NET_IP_ALIGN == 2)
3390                 type |= RX_BD_FLAGS_SOP;
3391
3392         rxr = &bp->rx_ring[ring_nr];
3393         ring = &rxr->rx_ring_struct;
3394         bnxt_init_rxbd_pages(ring, type);
3395
3396         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3397                 bpf_prog_add(bp->xdp_prog, 1);
3398                 rxr->xdp_prog = bp->xdp_prog;
3399         }
3400         ring->fw_ring_id = INVALID_HW_RING_ID;
3401
3402         ring = &rxr->rx_agg_ring_struct;
3403         ring->fw_ring_id = INVALID_HW_RING_ID;
3404
3405         if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3406                 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3407                         RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3408
3409                 bnxt_init_rxbd_pages(ring, type);
3410         }
3411
3412         return bnxt_alloc_one_rx_ring(bp, ring_nr);
3413 }
3414
3415 static void bnxt_init_cp_rings(struct bnxt *bp)
3416 {
3417         int i, j;
3418
3419         for (i = 0; i < bp->cp_nr_rings; i++) {
3420                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3421                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3422
3423                 ring->fw_ring_id = INVALID_HW_RING_ID;
3424                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3425                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3426                 for (j = 0; j < 2; j++) {
3427                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3428
3429                         if (!cpr2)
3430                                 continue;
3431
3432                         ring = &cpr2->cp_ring_struct;
3433                         ring->fw_ring_id = INVALID_HW_RING_ID;
3434                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3435                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3436                 }
3437         }
3438 }
3439
3440 static int bnxt_init_rx_rings(struct bnxt *bp)
3441 {
3442         int i, rc = 0;
3443
3444         if (BNXT_RX_PAGE_MODE(bp)) {
3445                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3446                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3447         } else {
3448                 bp->rx_offset = BNXT_RX_OFFSET;
3449                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3450         }
3451
3452         for (i = 0; i < bp->rx_nr_rings; i++) {
3453                 rc = bnxt_init_one_rx_ring(bp, i);
3454                 if (rc)
3455                         break;
3456         }
3457
3458         return rc;
3459 }
3460
3461 static int bnxt_init_tx_rings(struct bnxt *bp)
3462 {
3463         u16 i;
3464
3465         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3466                                    MAX_SKB_FRAGS + 1);
3467
3468         for (i = 0; i < bp->tx_nr_rings; i++) {
3469                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3470                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3471
3472                 ring->fw_ring_id = INVALID_HW_RING_ID;
3473         }
3474
3475         return 0;
3476 }
3477
3478 static void bnxt_free_ring_grps(struct bnxt *bp)
3479 {
3480         kfree(bp->grp_info);
3481         bp->grp_info = NULL;
3482 }
3483
3484 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3485 {
3486         int i;
3487
3488         if (irq_re_init) {
3489                 bp->grp_info = kcalloc(bp->cp_nr_rings,
3490                                        sizeof(struct bnxt_ring_grp_info),
3491                                        GFP_KERNEL);
3492                 if (!bp->grp_info)
3493                         return -ENOMEM;
3494         }
3495         for (i = 0; i < bp->cp_nr_rings; i++) {
3496                 if (irq_re_init)
3497                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3498                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3499                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3500                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3501                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3502         }
3503         return 0;
3504 }
3505
3506 static void bnxt_free_vnics(struct bnxt *bp)
3507 {
3508         kfree(bp->vnic_info);
3509         bp->vnic_info = NULL;
3510         bp->nr_vnics = 0;
3511 }
3512
3513 static int bnxt_alloc_vnics(struct bnxt *bp)
3514 {
3515         int num_vnics = 1;
3516
3517 #ifdef CONFIG_RFS_ACCEL
3518         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3519                 num_vnics += bp->rx_nr_rings;
3520 #endif
3521
3522         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3523                 num_vnics++;
3524
3525         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3526                                 GFP_KERNEL);
3527         if (!bp->vnic_info)
3528                 return -ENOMEM;
3529
3530         bp->nr_vnics = num_vnics;
3531         return 0;
3532 }
3533
3534 static void bnxt_init_vnics(struct bnxt *bp)
3535 {
3536         int i;
3537
3538         for (i = 0; i < bp->nr_vnics; i++) {
3539                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3540                 int j;
3541
3542                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3543                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3544                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3545
3546                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3547
3548                 if (bp->vnic_info[i].rss_hash_key) {
3549                         if (i == 0)
3550                                 prandom_bytes(vnic->rss_hash_key,
3551                                               HW_HASH_KEY_SIZE);
3552                         else
3553                                 memcpy(vnic->rss_hash_key,
3554                                        bp->vnic_info[0].rss_hash_key,
3555                                        HW_HASH_KEY_SIZE);
3556                 }
3557         }
3558 }
3559
3560 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3561 {
3562         int pages;
3563
3564         pages = ring_size / desc_per_pg;
3565
3566         if (!pages)
3567                 return 1;
3568
3569         pages++;
3570
3571         while (pages & (pages - 1))
3572                 pages++;
3573
3574         return pages;
3575 }
3576
3577 void bnxt_set_tpa_flags(struct bnxt *bp)
3578 {
3579         bp->flags &= ~BNXT_FLAG_TPA;
3580         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3581                 return;
3582         if (bp->dev->features & NETIF_F_LRO)
3583                 bp->flags |= BNXT_FLAG_LRO;
3584         else if (bp->dev->features & NETIF_F_GRO_HW)
3585                 bp->flags |= BNXT_FLAG_GRO;
3586 }
3587
3588 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3589  * be set on entry.
3590  */
3591 void bnxt_set_ring_params(struct bnxt *bp)
3592 {
3593         u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3594         u32 agg_factor = 0, agg_ring_size = 0;
3595
3596         /* 8 for CRC and VLAN */
3597         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3598
3599         rx_space = rx_size + NET_SKB_PAD +
3600                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3601
3602         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3603         ring_size = bp->rx_ring_size;
3604         bp->rx_agg_ring_size = 0;
3605         bp->rx_agg_nr_pages = 0;
3606
3607         if (bp->flags & BNXT_FLAG_TPA)
3608                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3609
3610         bp->flags &= ~BNXT_FLAG_JUMBO;
3611         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3612                 u32 jumbo_factor;
3613
3614                 bp->flags |= BNXT_FLAG_JUMBO;
3615                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3616                 if (jumbo_factor > agg_factor)
3617                         agg_factor = jumbo_factor;
3618         }
3619         agg_ring_size = ring_size * agg_factor;
3620
3621         if (agg_ring_size) {
3622                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3623                                                         RX_DESC_CNT);
3624                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3625                         u32 tmp = agg_ring_size;
3626
3627                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3628                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3629                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3630                                     tmp, agg_ring_size);
3631                 }
3632                 bp->rx_agg_ring_size = agg_ring_size;
3633                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3634                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3635                 rx_space = rx_size + NET_SKB_PAD +
3636                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3637         }
3638
3639         bp->rx_buf_use_size = rx_size;
3640         bp->rx_buf_size = rx_space;
3641
3642         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3643         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3644
3645         ring_size = bp->tx_ring_size;
3646         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3647         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3648
3649         max_rx_cmpl = bp->rx_ring_size;
3650         /* MAX TPA needs to be added because TPA_START completions are
3651          * immediately recycled, so the TPA completions are not bound by
3652          * the RX ring size.
3653          */
3654         if (bp->flags & BNXT_FLAG_TPA)
3655                 max_rx_cmpl += bp->max_tpa;
3656         /* RX and TPA completions are 32-byte, all others are 16-byte */
3657         ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3658         bp->cp_ring_size = ring_size;
3659
3660         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3661         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3662                 bp->cp_nr_pages = MAX_CP_PAGES;
3663                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3664                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3665                             ring_size, bp->cp_ring_size);
3666         }
3667         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3668         bp->cp_ring_mask = bp->cp_bit - 1;
3669 }
3670
3671 /* Changing allocation mode of RX rings.
3672  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3673  */
3674 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3675 {
3676         if (page_mode) {
3677                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3678                         return -EOPNOTSUPP;
3679                 bp->dev->max_mtu =
3680                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3681                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3682                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3683                 bp->rx_dir = DMA_BIDIRECTIONAL;
3684                 bp->rx_skb_func = bnxt_rx_page_skb;
3685                 /* Disable LRO or GRO_HW */
3686                 netdev_update_features(bp->dev);
3687         } else {
3688                 bp->dev->max_mtu = bp->max_mtu;
3689                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3690                 bp->rx_dir = DMA_FROM_DEVICE;
3691                 bp->rx_skb_func = bnxt_rx_skb;
3692         }
3693         return 0;
3694 }
3695
3696 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3697 {
3698         int i;
3699         struct bnxt_vnic_info *vnic;
3700         struct pci_dev *pdev = bp->pdev;
3701
3702         if (!bp->vnic_info)
3703                 return;
3704
3705         for (i = 0; i < bp->nr_vnics; i++) {
3706                 vnic = &bp->vnic_info[i];
3707
3708                 kfree(vnic->fw_grp_ids);
3709                 vnic->fw_grp_ids = NULL;
3710
3711                 kfree(vnic->uc_list);
3712                 vnic->uc_list = NULL;
3713
3714                 if (vnic->mc_list) {
3715                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3716                                           vnic->mc_list, vnic->mc_list_mapping);
3717                         vnic->mc_list = NULL;
3718                 }
3719
3720                 if (vnic->rss_table) {
3721                         dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3722                                           vnic->rss_table,
3723                                           vnic->rss_table_dma_addr);
3724                         vnic->rss_table = NULL;
3725                 }
3726
3727                 vnic->rss_hash_key = NULL;
3728                 vnic->flags = 0;
3729         }
3730 }
3731
3732 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3733 {
3734         int i, rc = 0, size;
3735         struct bnxt_vnic_info *vnic;
3736         struct pci_dev *pdev = bp->pdev;
3737         int max_rings;
3738
3739         for (i = 0; i < bp->nr_vnics; i++) {
3740                 vnic = &bp->vnic_info[i];
3741
3742                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3743                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3744
3745                         if (mem_size > 0) {
3746                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3747                                 if (!vnic->uc_list) {
3748                                         rc = -ENOMEM;
3749                                         goto out;
3750                                 }
3751                         }
3752                 }
3753
3754                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3755                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3756                         vnic->mc_list =
3757                                 dma_alloc_coherent(&pdev->dev,
3758                                                    vnic->mc_list_size,
3759                                                    &vnic->mc_list_mapping,
3760                                                    GFP_KERNEL);
3761                         if (!vnic->mc_list) {
3762                                 rc = -ENOMEM;
3763                                 goto out;
3764                         }
3765                 }
3766
3767                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3768                         goto vnic_skip_grps;
3769
3770                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3771                         max_rings = bp->rx_nr_rings;
3772                 else
3773                         max_rings = 1;
3774
3775                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3776                 if (!vnic->fw_grp_ids) {
3777                         rc = -ENOMEM;
3778                         goto out;
3779                 }
3780 vnic_skip_grps:
3781                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3782                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3783                         continue;
3784
3785                 /* Allocate rss table and hash key */
3786                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3787                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3788                         size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3789
3790                 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3791                 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3792                                                      vnic->rss_table_size,
3793                                                      &vnic->rss_table_dma_addr,
3794                                                      GFP_KERNEL);
3795                 if (!vnic->rss_table) {
3796                         rc = -ENOMEM;
3797                         goto out;
3798                 }
3799
3800                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3801                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3802         }
3803         return 0;
3804
3805 out:
3806         return rc;
3807 }
3808
3809 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3810 {
3811         struct pci_dev *pdev = bp->pdev;
3812
3813         if (bp->hwrm_cmd_resp_addr) {
3814                 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3815                                   bp->hwrm_cmd_resp_dma_addr);
3816                 bp->hwrm_cmd_resp_addr = NULL;
3817         }
3818
3819         if (bp->hwrm_cmd_kong_resp_addr) {
3820                 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3821                                   bp->hwrm_cmd_kong_resp_addr,
3822                                   bp->hwrm_cmd_kong_resp_dma_addr);
3823                 bp->hwrm_cmd_kong_resp_addr = NULL;
3824         }
3825 }
3826
3827 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3828 {
3829         struct pci_dev *pdev = bp->pdev;
3830
3831         if (bp->hwrm_cmd_kong_resp_addr)
3832                 return 0;
3833
3834         bp->hwrm_cmd_kong_resp_addr =
3835                 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3836                                    &bp->hwrm_cmd_kong_resp_dma_addr,
3837                                    GFP_KERNEL);
3838         if (!bp->hwrm_cmd_kong_resp_addr)
3839                 return -ENOMEM;
3840
3841         return 0;
3842 }
3843
3844 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3845 {
3846         struct pci_dev *pdev = bp->pdev;
3847
3848         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3849                                                    &bp->hwrm_cmd_resp_dma_addr,
3850                                                    GFP_KERNEL);
3851         if (!bp->hwrm_cmd_resp_addr)
3852                 return -ENOMEM;
3853
3854         return 0;
3855 }
3856
3857 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3858 {
3859         if (bp->hwrm_short_cmd_req_addr) {
3860                 struct pci_dev *pdev = bp->pdev;
3861
3862                 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3863                                   bp->hwrm_short_cmd_req_addr,
3864                                   bp->hwrm_short_cmd_req_dma_addr);
3865                 bp->hwrm_short_cmd_req_addr = NULL;
3866         }
3867 }
3868
3869 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3870 {
3871         struct pci_dev *pdev = bp->pdev;
3872
3873         if (bp->hwrm_short_cmd_req_addr)
3874                 return 0;
3875
3876         bp->hwrm_short_cmd_req_addr =
3877                 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3878                                    &bp->hwrm_short_cmd_req_dma_addr,
3879                                    GFP_KERNEL);
3880         if (!bp->hwrm_short_cmd_req_addr)
3881                 return -ENOMEM;
3882
3883         return 0;
3884 }
3885
3886 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3887 {
3888         kfree(stats->hw_masks);
3889         stats->hw_masks = NULL;
3890         kfree(stats->sw_stats);
3891         stats->sw_stats = NULL;
3892         if (stats->hw_stats) {
3893                 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3894                                   stats->hw_stats_map);
3895                 stats->hw_stats = NULL;
3896         }
3897 }
3898
3899 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
3900                                 bool alloc_masks)
3901 {
3902         stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
3903                                              &stats->hw_stats_map, GFP_KERNEL);
3904         if (!stats->hw_stats)
3905                 return -ENOMEM;
3906
3907         stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
3908         if (!stats->sw_stats)
3909                 goto stats_mem_err;
3910
3911         if (alloc_masks) {
3912                 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
3913                 if (!stats->hw_masks)
3914                         goto stats_mem_err;
3915         }
3916         return 0;
3917
3918 stats_mem_err:
3919         bnxt_free_stats_mem(bp, stats);
3920         return -ENOMEM;
3921 }
3922
3923 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
3924 {
3925         int i;
3926
3927         for (i = 0; i < count; i++)
3928                 mask_arr[i] = mask;
3929 }
3930
3931 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
3932 {
3933         int i;
3934
3935         for (i = 0; i < count; i++)
3936                 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
3937 }
3938
3939 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
3940                                     struct bnxt_stats_mem *stats)
3941 {
3942         struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3943         struct hwrm_func_qstats_ext_input req = {0};
3944         __le64 *hw_masks;
3945         int rc;
3946
3947         if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
3948             !(bp->flags & BNXT_FLAG_CHIP_P5))
3949                 return -EOPNOTSUPP;
3950
3951         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
3952         req.fid = cpu_to_le16(0xffff);
3953         req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3954         mutex_lock(&bp->hwrm_cmd_lock);
3955         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3956         if (rc)
3957                 goto qstat_exit;
3958
3959         hw_masks = &resp->rx_ucast_pkts;
3960         bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
3961
3962 qstat_exit:
3963         mutex_unlock(&bp->hwrm_cmd_lock);
3964         return rc;
3965 }
3966
3967 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
3968 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
3969
3970 static void bnxt_init_stats(struct bnxt *bp)
3971 {
3972         struct bnxt_napi *bnapi = bp->bnapi[0];
3973         struct bnxt_cp_ring_info *cpr;
3974         struct bnxt_stats_mem *stats;
3975         __le64 *rx_stats, *tx_stats;
3976         int rc, rx_count, tx_count;
3977         u64 *rx_masks, *tx_masks;
3978         u64 mask;
3979         u8 flags;
3980
3981         cpr = &bnapi->cp_ring;
3982         stats = &cpr->stats;
3983         rc = bnxt_hwrm_func_qstat_ext(bp, stats);
3984         if (rc) {
3985                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3986                         mask = (1ULL << 48) - 1;
3987                 else
3988                         mask = -1ULL;
3989                 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
3990         }
3991         if (bp->flags & BNXT_FLAG_PORT_STATS) {
3992                 stats = &bp->port_stats;
3993                 rx_stats = stats->hw_stats;
3994                 rx_masks = stats->hw_masks;
3995                 rx_count = sizeof(struct rx_port_stats) / 8;
3996                 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3997                 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3998                 tx_count = sizeof(struct tx_port_stats) / 8;
3999
4000                 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4001                 rc = bnxt_hwrm_port_qstats(bp, flags);
4002                 if (rc) {
4003                         mask = (1ULL << 40) - 1;
4004
4005                         bnxt_fill_masks(rx_masks, mask, rx_count);
4006                         bnxt_fill_masks(tx_masks, mask, tx_count);
4007                 } else {
4008                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4009                         bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4010                         bnxt_hwrm_port_qstats(bp, 0);
4011                 }
4012         }
4013         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4014                 stats = &bp->rx_port_stats_ext;
4015                 rx_stats = stats->hw_stats;
4016                 rx_masks = stats->hw_masks;
4017                 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4018                 stats = &bp->tx_port_stats_ext;
4019                 tx_stats = stats->hw_stats;
4020                 tx_masks = stats->hw_masks;
4021                 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4022
4023                 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4024                 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4025                 if (rc) {
4026                         mask = (1ULL << 40) - 1;
4027
4028                         bnxt_fill_masks(rx_masks, mask, rx_count);
4029                         if (tx_stats)
4030                                 bnxt_fill_masks(tx_masks, mask, tx_count);
4031                 } else {
4032                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4033                         if (tx_stats)
4034                                 bnxt_copy_hw_masks(tx_masks, tx_stats,
4035                                                    tx_count);
4036                         bnxt_hwrm_port_qstats_ext(bp, 0);
4037                 }
4038         }
4039 }
4040
4041 static void bnxt_free_port_stats(struct bnxt *bp)
4042 {
4043         bp->flags &= ~BNXT_FLAG_PORT_STATS;
4044         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4045
4046         bnxt_free_stats_mem(bp, &bp->port_stats);
4047         bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4048         bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4049 }
4050
4051 static void bnxt_free_ring_stats(struct bnxt *bp)
4052 {
4053         int i;
4054
4055         if (!bp->bnapi)
4056                 return;
4057
4058         for (i = 0; i < bp->cp_nr_rings; i++) {
4059                 struct bnxt_napi *bnapi = bp->bnapi[i];
4060                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4061
4062                 bnxt_free_stats_mem(bp, &cpr->stats);
4063         }
4064 }
4065
4066 static int bnxt_alloc_stats(struct bnxt *bp)
4067 {
4068         u32 size, i;
4069         int rc;
4070
4071         size = bp->hw_ring_stats_size;
4072
4073         for (i = 0; i < bp->cp_nr_rings; i++) {
4074                 struct bnxt_napi *bnapi = bp->bnapi[i];
4075                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4076
4077                 cpr->stats.len = size;
4078                 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4079                 if (rc)
4080                         return rc;
4081
4082                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4083         }
4084
4085         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4086                 return 0;
4087
4088         if (bp->port_stats.hw_stats)
4089                 goto alloc_ext_stats;
4090
4091         bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4092         rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4093         if (rc)
4094                 return rc;
4095
4096         bp->flags |= BNXT_FLAG_PORT_STATS;
4097
4098 alloc_ext_stats:
4099         /* Display extended statistics only if FW supports it */
4100         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4101                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4102                         return 0;
4103
4104         if (bp->rx_port_stats_ext.hw_stats)
4105                 goto alloc_tx_ext_stats;
4106
4107         bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4108         rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4109         /* Extended stats are optional */
4110         if (rc)
4111                 return 0;
4112
4113 alloc_tx_ext_stats:
4114         if (bp->tx_port_stats_ext.hw_stats)
4115                 return 0;
4116
4117         if (bp->hwrm_spec_code >= 0x10902 ||
4118             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4119                 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4120                 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4121                 /* Extended stats are optional */
4122                 if (rc)
4123                         return 0;
4124         }
4125         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4126         return 0;
4127 }
4128
4129 static void bnxt_clear_ring_indices(struct bnxt *bp)
4130 {
4131         int i;
4132
4133         if (!bp->bnapi)
4134                 return;
4135
4136         for (i = 0; i < bp->cp_nr_rings; i++) {
4137                 struct bnxt_napi *bnapi = bp->bnapi[i];
4138                 struct bnxt_cp_ring_info *cpr;
4139                 struct bnxt_rx_ring_info *rxr;
4140                 struct bnxt_tx_ring_info *txr;
4141
4142                 if (!bnapi)
4143                         continue;
4144
4145                 cpr = &bnapi->cp_ring;
4146                 cpr->cp_raw_cons = 0;
4147
4148                 txr = bnapi->tx_ring;
4149                 if (txr) {
4150                         txr->tx_prod = 0;
4151                         txr->tx_cons = 0;
4152                 }
4153
4154                 rxr = bnapi->rx_ring;
4155                 if (rxr) {
4156                         rxr->rx_prod = 0;
4157                         rxr->rx_agg_prod = 0;
4158                         rxr->rx_sw_agg_prod = 0;
4159                         rxr->rx_next_cons = 0;
4160                 }
4161         }
4162 }
4163
4164 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4165 {
4166 #ifdef CONFIG_RFS_ACCEL
4167         int i;
4168
4169         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
4170          * safe to delete the hash table.
4171          */
4172         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4173                 struct hlist_head *head;
4174                 struct hlist_node *tmp;
4175                 struct bnxt_ntuple_filter *fltr;
4176
4177                 head = &bp->ntp_fltr_hash_tbl[i];
4178                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4179                         hlist_del(&fltr->hash);
4180                         kfree(fltr);
4181                 }
4182         }
4183         if (irq_reinit) {
4184                 kfree(bp->ntp_fltr_bmap);
4185                 bp->ntp_fltr_bmap = NULL;
4186         }
4187         bp->ntp_fltr_count = 0;
4188 #endif
4189 }
4190
4191 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4192 {
4193 #ifdef CONFIG_RFS_ACCEL
4194         int i, rc = 0;
4195
4196         if (!(bp->flags & BNXT_FLAG_RFS))
4197                 return 0;
4198
4199         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4200                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4201
4202         bp->ntp_fltr_count = 0;
4203         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4204                                     sizeof(long),
4205                                     GFP_KERNEL);
4206
4207         if (!bp->ntp_fltr_bmap)
4208                 rc = -ENOMEM;
4209
4210         return rc;
4211 #else
4212         return 0;
4213 #endif
4214 }
4215
4216 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4217 {
4218         bnxt_free_vnic_attributes(bp);
4219         bnxt_free_tx_rings(bp);
4220         bnxt_free_rx_rings(bp);
4221         bnxt_free_cp_rings(bp);
4222         bnxt_free_ntp_fltrs(bp, irq_re_init);
4223         if (irq_re_init) {
4224                 bnxt_free_ring_stats(bp);
4225                 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4226                     test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4227                         bnxt_free_port_stats(bp);
4228                 bnxt_free_ring_grps(bp);
4229                 bnxt_free_vnics(bp);
4230                 kfree(bp->tx_ring_map);
4231                 bp->tx_ring_map = NULL;
4232                 kfree(bp->tx_ring);
4233                 bp->tx_ring = NULL;
4234                 kfree(bp->rx_ring);
4235                 bp->rx_ring = NULL;
4236                 kfree(bp->bnapi);
4237                 bp->bnapi = NULL;
4238         } else {
4239                 bnxt_clear_ring_indices(bp);
4240         }
4241 }
4242
4243 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4244 {
4245         int i, j, rc, size, arr_size;
4246         void *bnapi;
4247
4248         if (irq_re_init) {
4249                 /* Allocate bnapi mem pointer array and mem block for
4250                  * all queues
4251                  */
4252                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4253                                 bp->cp_nr_rings);
4254                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4255                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4256                 if (!bnapi)
4257                         return -ENOMEM;
4258
4259                 bp->bnapi = bnapi;
4260                 bnapi += arr_size;
4261                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4262                         bp->bnapi[i] = bnapi;
4263                         bp->bnapi[i]->index = i;
4264                         bp->bnapi[i]->bp = bp;
4265                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4266                                 struct bnxt_cp_ring_info *cpr =
4267                                         &bp->bnapi[i]->cp_ring;
4268
4269                                 cpr->cp_ring_struct.ring_mem.flags =
4270                                         BNXT_RMEM_RING_PTE_FLAG;
4271                         }
4272                 }
4273
4274                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4275                                       sizeof(struct bnxt_rx_ring_info),
4276                                       GFP_KERNEL);
4277                 if (!bp->rx_ring)
4278                         return -ENOMEM;
4279
4280                 for (i = 0; i < bp->rx_nr_rings; i++) {
4281                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4282
4283                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4284                                 rxr->rx_ring_struct.ring_mem.flags =
4285                                         BNXT_RMEM_RING_PTE_FLAG;
4286                                 rxr->rx_agg_ring_struct.ring_mem.flags =
4287                                         BNXT_RMEM_RING_PTE_FLAG;
4288                         }
4289                         rxr->bnapi = bp->bnapi[i];
4290                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4291                 }
4292
4293                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4294                                       sizeof(struct bnxt_tx_ring_info),
4295                                       GFP_KERNEL);
4296                 if (!bp->tx_ring)
4297                         return -ENOMEM;
4298
4299                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4300                                           GFP_KERNEL);
4301
4302                 if (!bp->tx_ring_map)
4303                         return -ENOMEM;
4304
4305                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4306                         j = 0;
4307                 else
4308                         j = bp->rx_nr_rings;
4309
4310                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4311                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4312
4313                         if (bp->flags & BNXT_FLAG_CHIP_P5)
4314                                 txr->tx_ring_struct.ring_mem.flags =
4315                                         BNXT_RMEM_RING_PTE_FLAG;
4316                         txr->bnapi = bp->bnapi[j];
4317                         bp->bnapi[j]->tx_ring = txr;
4318                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4319                         if (i >= bp->tx_nr_rings_xdp) {
4320                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4321                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
4322                         } else {
4323                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4324                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4325                         }
4326                 }
4327
4328                 rc = bnxt_alloc_stats(bp);
4329                 if (rc)
4330                         goto alloc_mem_err;
4331                 bnxt_init_stats(bp);
4332
4333                 rc = bnxt_alloc_ntp_fltrs(bp);
4334                 if (rc)
4335                         goto alloc_mem_err;
4336
4337                 rc = bnxt_alloc_vnics(bp);
4338                 if (rc)
4339                         goto alloc_mem_err;
4340         }
4341
4342         bnxt_init_ring_struct(bp);
4343
4344         rc = bnxt_alloc_rx_rings(bp);
4345         if (rc)
4346                 goto alloc_mem_err;
4347
4348         rc = bnxt_alloc_tx_rings(bp);
4349         if (rc)
4350                 goto alloc_mem_err;
4351
4352         rc = bnxt_alloc_cp_rings(bp);
4353         if (rc)
4354                 goto alloc_mem_err;
4355
4356         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4357                                   BNXT_VNIC_UCAST_FLAG;
4358         rc = bnxt_alloc_vnic_attributes(bp);
4359         if (rc)
4360                 goto alloc_mem_err;
4361         return 0;
4362
4363 alloc_mem_err:
4364         bnxt_free_mem(bp, true);
4365         return rc;
4366 }
4367
4368 static void bnxt_disable_int(struct bnxt *bp)
4369 {
4370         int i;
4371
4372         if (!bp->bnapi)
4373                 return;
4374
4375         for (i = 0; i < bp->cp_nr_rings; i++) {
4376                 struct bnxt_napi *bnapi = bp->bnapi[i];
4377                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4378                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4379
4380                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4381                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4382         }
4383 }
4384
4385 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4386 {
4387         struct bnxt_napi *bnapi = bp->bnapi[n];
4388         struct bnxt_cp_ring_info *cpr;
4389
4390         cpr = &bnapi->cp_ring;
4391         return cpr->cp_ring_struct.map_idx;
4392 }
4393
4394 static void bnxt_disable_int_sync(struct bnxt *bp)
4395 {
4396         int i;
4397
4398         if (!bp->irq_tbl)
4399                 return;
4400
4401         atomic_inc(&bp->intr_sem);
4402
4403         bnxt_disable_int(bp);
4404         for (i = 0; i < bp->cp_nr_rings; i++) {
4405                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4406
4407                 synchronize_irq(bp->irq_tbl[map_idx].vector);
4408         }
4409 }
4410
4411 static void bnxt_enable_int(struct bnxt *bp)
4412 {
4413         int i;
4414
4415         atomic_set(&bp->intr_sem, 0);
4416         for (i = 0; i < bp->cp_nr_rings; i++) {
4417                 struct bnxt_napi *bnapi = bp->bnapi[i];
4418                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4419
4420                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4421         }
4422 }
4423
4424 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4425                             u16 cmpl_ring, u16 target_id)
4426 {
4427         struct input *req = request;
4428
4429         req->req_type = cpu_to_le16(req_type);
4430         req->cmpl_ring = cpu_to_le16(cmpl_ring);
4431         req->target_id = cpu_to_le16(target_id);
4432         if (bnxt_kong_hwrm_message(bp, req))
4433                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4434         else
4435                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4436 }
4437
4438 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4439 {
4440         switch (hwrm_err) {
4441         case HWRM_ERR_CODE_SUCCESS:
4442                 return 0;
4443         case HWRM_ERR_CODE_RESOURCE_LOCKED:
4444                 return -EROFS;
4445         case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4446                 return -EACCES;
4447         case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4448                 return -ENOSPC;
4449         case HWRM_ERR_CODE_INVALID_PARAMS:
4450         case HWRM_ERR_CODE_INVALID_FLAGS:
4451         case HWRM_ERR_CODE_INVALID_ENABLES:
4452         case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4453         case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4454                 return -EINVAL;
4455         case HWRM_ERR_CODE_NO_BUFFER:
4456                 return -ENOMEM;
4457         case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4458         case HWRM_ERR_CODE_BUSY:
4459                 return -EAGAIN;
4460         case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4461                 return -EOPNOTSUPP;
4462         default:
4463                 return -EIO;
4464         }
4465 }
4466
4467 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4468                                  int timeout, bool silent)
4469 {
4470         int i, intr_process, rc, tmo_count;
4471         struct input *req = msg;
4472         u32 *data = msg;
4473         u8 *valid;
4474         u16 cp_ring_id, len = 0;
4475         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4476         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4477         struct hwrm_short_input short_input = {0};
4478         u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4479         u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4480         u16 dst = BNXT_HWRM_CHNL_CHIMP;
4481
4482         if (BNXT_NO_FW_ACCESS(bp) &&
4483             le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
4484                 return -EBUSY;
4485
4486         if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4487                 if (msg_len > bp->hwrm_max_ext_req_len ||
4488                     !bp->hwrm_short_cmd_req_addr)
4489                         return -EINVAL;
4490         }
4491
4492         if (bnxt_hwrm_kong_chnl(bp, req)) {
4493                 dst = BNXT_HWRM_CHNL_KONG;
4494                 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4495                 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4496                 resp = bp->hwrm_cmd_kong_resp_addr;
4497         }
4498
4499         memset(resp, 0, PAGE_SIZE);
4500         cp_ring_id = le16_to_cpu(req->cmpl_ring);
4501         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4502
4503         req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4504         /* currently supports only one outstanding message */
4505         if (intr_process)
4506                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4507
4508         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4509             msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4510                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4511                 u16 max_msg_len;
4512
4513                 /* Set boundary for maximum extended request length for short
4514                  * cmd format. If passed up from device use the max supported
4515                  * internal req length.
4516                  */
4517                 max_msg_len = bp->hwrm_max_ext_req_len;
4518
4519                 memcpy(short_cmd_req, req, msg_len);
4520                 if (msg_len < max_msg_len)
4521                         memset(short_cmd_req + msg_len, 0,
4522                                max_msg_len - msg_len);
4523
4524                 short_input.req_type = req->req_type;
4525                 short_input.signature =
4526                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4527                 short_input.size = cpu_to_le16(msg_len);
4528                 short_input.req_addr =
4529                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4530
4531                 data = (u32 *)&short_input;
4532                 msg_len = sizeof(short_input);
4533
4534                 /* Sync memory write before updating doorbell */
4535                 wmb();
4536
4537                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4538         }
4539
4540         /* Write request msg to hwrm channel */
4541         __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4542
4543         for (i = msg_len; i < max_req_len; i += 4)
4544                 writel(0, bp->bar0 + bar_offset + i);
4545
4546         /* Ring channel doorbell */
4547         writel(1, bp->bar0 + doorbell_offset);
4548
4549         if (!pci_is_enabled(bp->pdev))
4550                 return -ENODEV;
4551
4552         if (!timeout)
4553                 timeout = DFLT_HWRM_CMD_TIMEOUT;
4554         /* Limit timeout to an upper limit */
4555         timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT);
4556         /* convert timeout to usec */
4557         timeout *= 1000;
4558
4559         i = 0;
4560         /* Short timeout for the first few iterations:
4561          * number of loops = number of loops for short timeout +
4562          * number of loops for standard timeout.
4563          */
4564         tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4565         timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4566         tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4567
4568         if (intr_process) {
4569                 u16 seq_id = bp->hwrm_intr_seq_id;
4570
4571                 /* Wait until hwrm response cmpl interrupt is processed */
4572                 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4573                        i++ < tmo_count) {
4574                         /* Abort the wait for completion if the FW health
4575                          * check has failed.
4576                          */
4577                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4578                                 return -EBUSY;
4579                         /* on first few passes, just barely sleep */
4580                         if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
4581                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4582                                              HWRM_SHORT_MAX_TIMEOUT);
4583                         } else {
4584                                 if (HWRM_WAIT_MUST_ABORT(bp, req))
4585                                         break;
4586                                 usleep_range(HWRM_MIN_TIMEOUT,
4587                                              HWRM_MAX_TIMEOUT);
4588                         }
4589                 }
4590
4591                 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4592                         if (!silent)
4593                                 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4594                                            le16_to_cpu(req->req_type));
4595                         return -EBUSY;
4596                 }
4597                 len = le16_to_cpu(resp->resp_len);
4598                 valid = ((u8 *)resp) + len - 1;
4599         } else {
4600                 int j;
4601
4602                 /* Check if response len is updated */
4603                 for (i = 0; i < tmo_count; i++) {
4604                         /* Abort the wait for completion if the FW health
4605                          * check has failed.
4606                          */
4607                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4608                                 return -EBUSY;
4609                         len = le16_to_cpu(resp->resp_len);
4610                         if (len)
4611                                 break;
4612                         /* on first few passes, just barely sleep */
4613                         if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
4614                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4615                                              HWRM_SHORT_MAX_TIMEOUT);
4616                         } else {
4617                                 if (HWRM_WAIT_MUST_ABORT(bp, req))
4618                                         goto timeout_abort;
4619                                 usleep_range(HWRM_MIN_TIMEOUT,
4620                                              HWRM_MAX_TIMEOUT);
4621                         }
4622                 }
4623
4624                 if (i >= tmo_count) {
4625 timeout_abort:
4626                         if (!silent)
4627                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4628                                            HWRM_TOTAL_TIMEOUT(i),
4629                                            le16_to_cpu(req->req_type),
4630                                            le16_to_cpu(req->seq_id), len);
4631                         return -EBUSY;
4632                 }
4633
4634                 /* Last byte of resp contains valid bit */
4635                 valid = ((u8 *)resp) + len - 1;
4636                 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4637                         /* make sure we read from updated DMA memory */
4638                         dma_rmb();
4639                         if (*valid)
4640                                 break;
4641                         usleep_range(1, 5);
4642                 }
4643
4644                 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4645                         if (!silent)
4646                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4647                                            HWRM_TOTAL_TIMEOUT(i),
4648                                            le16_to_cpu(req->req_type),
4649                                            le16_to_cpu(req->seq_id), len,
4650                                            *valid);
4651                         return -EBUSY;
4652                 }
4653         }
4654
4655         /* Zero valid bit for compatibility.  Valid bit in an older spec
4656          * may become a new field in a newer spec.  We must make sure that
4657          * a new field not implemented by old spec will read zero.
4658          */
4659         *valid = 0;
4660         rc = le16_to_cpu(resp->error_code);
4661         if (rc && !silent)
4662                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4663                            le16_to_cpu(resp->req_type),
4664                            le16_to_cpu(resp->seq_id), rc);
4665         return bnxt_hwrm_to_stderr(rc);
4666 }
4667
4668 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4669 {
4670         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4671 }
4672
4673 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4674                               int timeout)
4675 {
4676         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4677 }
4678
4679 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4680 {
4681         int rc;
4682
4683         mutex_lock(&bp->hwrm_cmd_lock);
4684         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4685         mutex_unlock(&bp->hwrm_cmd_lock);
4686         return rc;
4687 }
4688
4689 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4690                              int timeout)
4691 {
4692         int rc;
4693
4694         mutex_lock(&bp->hwrm_cmd_lock);
4695         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4696         mutex_unlock(&bp->hwrm_cmd_lock);
4697         return rc;
4698 }
4699
4700 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4701                             bool async_only)
4702 {
4703         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4704         struct hwrm_func_drv_rgtr_input req = {0};
4705         DECLARE_BITMAP(async_events_bmap, 256);
4706         u32 *events = (u32 *)async_events_bmap;
4707         u32 flags;
4708         int rc, i;
4709
4710         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4711
4712         req.enables =
4713                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4714                             FUNC_DRV_RGTR_REQ_ENABLES_VER |
4715                             FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4716
4717         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4718         flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4719         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4720                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4721         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4722                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4723                          FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4724         req.flags = cpu_to_le32(flags);
4725         req.ver_maj_8b = DRV_VER_MAJ;
4726         req.ver_min_8b = DRV_VER_MIN;
4727         req.ver_upd_8b = DRV_VER_UPD;
4728         req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4729         req.ver_min = cpu_to_le16(DRV_VER_MIN);
4730         req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4731
4732         if (BNXT_PF(bp)) {
4733                 u32 data[8];
4734                 int i;
4735
4736                 memset(data, 0, sizeof(data));
4737                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4738                         u16 cmd = bnxt_vf_req_snif[i];
4739                         unsigned int bit, idx;
4740
4741                         idx = cmd / 32;
4742                         bit = cmd % 32;
4743                         data[idx] |= 1 << bit;
4744                 }
4745
4746                 for (i = 0; i < 8; i++)
4747                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4748
4749                 req.enables |=
4750                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4751         }
4752
4753         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4754                 req.flags |= cpu_to_le32(
4755                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4756
4757         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4758         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4759                 u16 event_id = bnxt_async_events_arr[i];
4760
4761                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4762                     !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4763                         continue;
4764                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4765         }
4766         if (bmap && bmap_size) {
4767                 for (i = 0; i < bmap_size; i++) {
4768                         if (test_bit(i, bmap))
4769                                 __set_bit(i, async_events_bmap);
4770                 }
4771         }
4772         for (i = 0; i < 8; i++)
4773                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4774
4775         if (async_only)
4776                 req.enables =
4777                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4778
4779         mutex_lock(&bp->hwrm_cmd_lock);
4780         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4781         if (!rc) {
4782                 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4783                 if (resp->flags &
4784                     cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4785                         bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4786         }
4787         mutex_unlock(&bp->hwrm_cmd_lock);
4788         return rc;
4789 }
4790
4791 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4792 {
4793         struct hwrm_func_drv_unrgtr_input req = {0};
4794
4795         if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4796                 return 0;
4797
4798         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4799         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4800 }
4801
4802 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4803 {
4804         u32 rc = 0;
4805         struct hwrm_tunnel_dst_port_free_input req = {0};
4806
4807         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4808         req.tunnel_type = tunnel_type;
4809
4810         switch (tunnel_type) {
4811         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4812                 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4813                 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4814                 break;
4815         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4816                 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4817                 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4818                 break;
4819         default:
4820                 break;
4821         }
4822
4823         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4824         if (rc)
4825                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4826                            rc);
4827         return rc;
4828 }
4829
4830 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4831                                            u8 tunnel_type)
4832 {
4833         u32 rc = 0;
4834         struct hwrm_tunnel_dst_port_alloc_input req = {0};
4835         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4836
4837         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4838
4839         req.tunnel_type = tunnel_type;
4840         req.tunnel_dst_port_val = port;
4841
4842         mutex_lock(&bp->hwrm_cmd_lock);
4843         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4844         if (rc) {
4845                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4846                            rc);
4847                 goto err_out;
4848         }
4849
4850         switch (tunnel_type) {
4851         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4852                 bp->vxlan_fw_dst_port_id =
4853                         le16_to_cpu(resp->tunnel_dst_port_id);
4854                 break;
4855         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4856                 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4857                 break;
4858         default:
4859                 break;
4860         }
4861
4862 err_out:
4863         mutex_unlock(&bp->hwrm_cmd_lock);
4864         return rc;
4865 }
4866
4867 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4868 {
4869         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4870         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4871
4872         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4873         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4874
4875         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4876         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4877         req.mask = cpu_to_le32(vnic->rx_mask);
4878         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4879 }
4880
4881 #ifdef CONFIG_RFS_ACCEL
4882 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4883                                             struct bnxt_ntuple_filter *fltr)
4884 {
4885         struct hwrm_cfa_ntuple_filter_free_input req = {0};
4886
4887         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4888         req.ntuple_filter_id = fltr->filter_id;
4889         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4890 }
4891
4892 #define BNXT_NTP_FLTR_FLAGS                                     \
4893         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4894          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4895          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4896          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4897          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4898          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4899          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4900          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4901          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4902          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4903          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4904          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4905          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4906          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4907
4908 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4909                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4910
4911 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4912                                              struct bnxt_ntuple_filter *fltr)
4913 {
4914         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4915         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4916         struct flow_keys *keys = &fltr->fkeys;
4917         struct bnxt_vnic_info *vnic;
4918         u32 flags = 0;
4919         int rc = 0;
4920
4921         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4922         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4923
4924         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4925                 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4926                 req.dst_id = cpu_to_le16(fltr->rxq);
4927         } else {
4928                 vnic = &bp->vnic_info[fltr->rxq + 1];
4929                 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4930         }
4931         req.flags = cpu_to_le32(flags);
4932         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4933
4934         req.ethertype = htons(ETH_P_IP);
4935         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4936         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4937         req.ip_protocol = keys->basic.ip_proto;
4938
4939         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4940                 int i;
4941
4942                 req.ethertype = htons(ETH_P_IPV6);
4943                 req.ip_addr_type =
4944                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4945                 *(struct in6_addr *)&req.src_ipaddr[0] =
4946                         keys->addrs.v6addrs.src;
4947                 *(struct in6_addr *)&req.dst_ipaddr[0] =
4948                         keys->addrs.v6addrs.dst;
4949                 for (i = 0; i < 4; i++) {
4950                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4951                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4952                 }
4953         } else {
4954                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4955                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4956                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4957                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4958         }
4959         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4960                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4961                 req.tunnel_type =
4962                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4963         }
4964
4965         req.src_port = keys->ports.src;
4966         req.src_port_mask = cpu_to_be16(0xffff);
4967         req.dst_port = keys->ports.dst;
4968         req.dst_port_mask = cpu_to_be16(0xffff);
4969
4970         mutex_lock(&bp->hwrm_cmd_lock);
4971         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4972         if (!rc) {
4973                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4974                 fltr->filter_id = resp->ntuple_filter_id;
4975         }
4976         mutex_unlock(&bp->hwrm_cmd_lock);
4977         return rc;
4978 }
4979 #endif
4980
4981 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4982                                      u8 *mac_addr)
4983 {
4984         u32 rc = 0;
4985         struct hwrm_cfa_l2_filter_alloc_input req = {0};
4986         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4987
4988         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4989         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4990         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4991                 req.flags |=
4992                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4993         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4994         req.enables =
4995                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4996                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4997                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4998         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4999         req.l2_addr_mask[0] = 0xff;
5000         req.l2_addr_mask[1] = 0xff;
5001         req.l2_addr_mask[2] = 0xff;
5002         req.l2_addr_mask[3] = 0xff;
5003         req.l2_addr_mask[4] = 0xff;
5004         req.l2_addr_mask[5] = 0xff;
5005
5006         mutex_lock(&bp->hwrm_cmd_lock);
5007         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5008         if (!rc)
5009                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
5010                                                         resp->l2_filter_id;
5011         mutex_unlock(&bp->hwrm_cmd_lock);
5012         return rc;
5013 }
5014
5015 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5016 {
5017         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5018         int rc = 0;
5019
5020         /* Any associated ntuple filters will also be cleared by firmware. */
5021         mutex_lock(&bp->hwrm_cmd_lock);
5022         for (i = 0; i < num_of_vnics; i++) {
5023                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5024
5025                 for (j = 0; j < vnic->uc_filter_count; j++) {
5026                         struct hwrm_cfa_l2_filter_free_input req = {0};
5027
5028                         bnxt_hwrm_cmd_hdr_init(bp, &req,
5029                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
5030
5031                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
5032
5033                         rc = _hwrm_send_message(bp, &req, sizeof(req),
5034                                                 HWRM_CMD_TIMEOUT);
5035                 }
5036                 vnic->uc_filter_count = 0;
5037         }
5038         mutex_unlock(&bp->hwrm_cmd_lock);
5039
5040         return rc;
5041 }
5042
5043 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5044 {
5045         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5046         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
5047         struct hwrm_vnic_tpa_cfg_input req = {0};
5048
5049         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5050                 return 0;
5051
5052         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
5053
5054         if (tpa_flags) {
5055                 u16 mss = bp->dev->mtu - 40;
5056                 u32 nsegs, n, segs = 0, flags;
5057
5058                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5059                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5060                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5061                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5062                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5063                 if (tpa_flags & BNXT_FLAG_GRO)
5064                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5065
5066                 req.flags = cpu_to_le32(flags);
5067
5068                 req.enables =
5069                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5070                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5071                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5072
5073                 /* Number of segs are log2 units, and first packet is not
5074                  * included as part of this units.
5075                  */
5076                 if (mss <= BNXT_RX_PAGE_SIZE) {
5077                         n = BNXT_RX_PAGE_SIZE / mss;
5078                         nsegs = (MAX_SKB_FRAGS - 1) * n;
5079                 } else {
5080                         n = mss / BNXT_RX_PAGE_SIZE;
5081                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
5082                                 n++;
5083                         nsegs = (MAX_SKB_FRAGS - n) / n;
5084                 }
5085
5086                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5087                         segs = MAX_TPA_SEGS_P5;
5088                         max_aggs = bp->max_tpa;
5089                 } else {
5090                         segs = ilog2(nsegs);
5091                 }
5092                 req.max_agg_segs = cpu_to_le16(segs);
5093                 req.max_aggs = cpu_to_le16(max_aggs);
5094
5095                 req.min_agg_len = cpu_to_le32(512);
5096         }
5097         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5098
5099         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5100 }
5101
5102 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5103 {
5104         struct bnxt_ring_grp_info *grp_info;
5105
5106         grp_info = &bp->grp_info[ring->grp_idx];
5107         return grp_info->cp_fw_ring_id;
5108 }
5109
5110 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5111 {
5112         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5113                 struct bnxt_napi *bnapi = rxr->bnapi;
5114                 struct bnxt_cp_ring_info *cpr;
5115
5116                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5117                 return cpr->cp_ring_struct.fw_ring_id;
5118         } else {
5119                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5120         }
5121 }
5122
5123 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5124 {
5125         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5126                 struct bnxt_napi *bnapi = txr->bnapi;
5127                 struct bnxt_cp_ring_info *cpr;
5128
5129                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5130                 return cpr->cp_ring_struct.fw_ring_id;
5131         } else {
5132                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5133         }
5134 }
5135
5136 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5137 {
5138         int entries;
5139
5140         if (bp->flags & BNXT_FLAG_CHIP_P5)
5141                 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5142         else
5143                 entries = HW_HASH_INDEX_SIZE;
5144
5145         bp->rss_indir_tbl_entries = entries;
5146         bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5147                                           GFP_KERNEL);
5148         if (!bp->rss_indir_tbl)
5149                 return -ENOMEM;
5150         return 0;
5151 }
5152
5153 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5154 {
5155         u16 max_rings, max_entries, pad, i;
5156
5157         if (!bp->rx_nr_rings)
5158                 return;
5159
5160         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5161                 max_rings = bp->rx_nr_rings - 1;
5162         else
5163                 max_rings = bp->rx_nr_rings;
5164
5165         max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5166
5167         for (i = 0; i < max_entries; i++)
5168                 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5169
5170         pad = bp->rss_indir_tbl_entries - max_entries;
5171         if (pad)
5172                 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5173 }
5174
5175 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5176 {
5177         u16 i, tbl_size, max_ring = 0;
5178
5179         if (!bp->rss_indir_tbl)
5180                 return 0;
5181
5182         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5183         for (i = 0; i < tbl_size; i++)
5184                 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5185         return max_ring;
5186 }
5187
5188 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5189 {
5190         if (bp->flags & BNXT_FLAG_CHIP_P5)
5191                 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5192         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5193                 return 2;
5194         return 1;
5195 }
5196
5197 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5198 {
5199         bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5200         u16 i, j;
5201
5202         /* Fill the RSS indirection table with ring group ids */
5203         for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5204                 if (!no_rss)
5205                         j = bp->rss_indir_tbl[i];
5206                 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5207         }
5208 }
5209
5210 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5211                                       struct bnxt_vnic_info *vnic)
5212 {
5213         __le16 *ring_tbl = vnic->rss_table;
5214         struct bnxt_rx_ring_info *rxr;
5215         u16 tbl_size, i;
5216
5217         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5218
5219         for (i = 0; i < tbl_size; i++) {
5220                 u16 ring_id, j;
5221
5222                 j = bp->rss_indir_tbl[i];
5223                 rxr = &bp->rx_ring[j];
5224
5225                 ring_id = rxr->rx_ring_struct.fw_ring_id;
5226                 *ring_tbl++ = cpu_to_le16(ring_id);
5227                 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5228                 *ring_tbl++ = cpu_to_le16(ring_id);
5229         }
5230 }
5231
5232 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5233 {
5234         if (bp->flags & BNXT_FLAG_CHIP_P5)
5235                 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5236         else
5237                 __bnxt_fill_hw_rss_tbl(bp, vnic);
5238 }
5239
5240 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5241 {
5242         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5243         struct hwrm_vnic_rss_cfg_input req = {0};
5244
5245         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5246             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5247                 return 0;
5248
5249         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5250         if (set_rss) {
5251                 bnxt_fill_hw_rss_tbl(bp, vnic);
5252                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5253                 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5254                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5255                 req.hash_key_tbl_addr =
5256                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
5257         }
5258         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5259         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5260 }
5261
5262 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5263 {
5264         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5265         struct hwrm_vnic_rss_cfg_input req = {0};
5266         dma_addr_t ring_tbl_map;
5267         u32 i, nr_ctxs;
5268
5269         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5270         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5271         if (!set_rss) {
5272                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5273                 return 0;
5274         }
5275         bnxt_fill_hw_rss_tbl(bp, vnic);
5276         req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5277         req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5278         req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5279         ring_tbl_map = vnic->rss_table_dma_addr;
5280         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5281         for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5282                 int rc;
5283
5284                 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5285                 req.ring_table_pair_index = i;
5286                 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5287                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5288                 if (rc)
5289                         return rc;
5290         }
5291         return 0;
5292 }
5293
5294 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5295 {
5296         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5297         struct hwrm_vnic_plcmodes_cfg_input req = {0};
5298
5299         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5300         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5301                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5302                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5303         req.enables =
5304                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5305                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5306         /* thresholds not implemented in firmware yet */
5307         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5308         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5309         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5310         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5311 }
5312
5313 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5314                                         u16 ctx_idx)
5315 {
5316         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
5317
5318         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5319         req.rss_cos_lb_ctx_id =
5320                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5321
5322         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5323         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5324 }
5325
5326 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5327 {
5328         int i, j;
5329
5330         for (i = 0; i < bp->nr_vnics; i++) {
5331                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5332
5333                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5334                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5335                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5336                 }
5337         }
5338         bp->rsscos_nr_ctxs = 0;
5339 }
5340
5341 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5342 {
5343         int rc;
5344         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
5345         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
5346                                                 bp->hwrm_cmd_resp_addr;
5347
5348         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5349                                -1);
5350
5351         mutex_lock(&bp->hwrm_cmd_lock);
5352         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5353         if (!rc)
5354                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5355                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
5356         mutex_unlock(&bp->hwrm_cmd_lock);
5357
5358         return rc;
5359 }
5360
5361 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5362 {
5363         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5364                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5365         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5366 }
5367
5368 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5369 {
5370         unsigned int ring = 0, grp_idx;
5371         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5372         struct hwrm_vnic_cfg_input req = {0};
5373         u16 def_vlan = 0;
5374
5375         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5376
5377         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5378                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5379
5380                 req.default_rx_ring_id =
5381                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5382                 req.default_cmpl_ring_id =
5383                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5384                 req.enables =
5385                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5386                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5387                 goto vnic_mru;
5388         }
5389         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5390         /* Only RSS support for now TBD: COS & LB */
5391         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5392                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5393                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5394                                            VNIC_CFG_REQ_ENABLES_MRU);
5395         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5396                 req.rss_rule =
5397                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5398                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5399                                            VNIC_CFG_REQ_ENABLES_MRU);
5400                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5401         } else {
5402                 req.rss_rule = cpu_to_le16(0xffff);
5403         }
5404
5405         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5406             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5407                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5408                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5409         } else {
5410                 req.cos_rule = cpu_to_le16(0xffff);
5411         }
5412
5413         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5414                 ring = 0;
5415         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5416                 ring = vnic_id - 1;
5417         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5418                 ring = bp->rx_nr_rings - 1;
5419
5420         grp_idx = bp->rx_ring[ring].bnapi->index;
5421         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5422         req.lb_rule = cpu_to_le16(0xffff);
5423 vnic_mru:
5424         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5425
5426         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5427 #ifdef CONFIG_BNXT_SRIOV
5428         if (BNXT_VF(bp))
5429                 def_vlan = bp->vf.vlan;
5430 #endif
5431         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5432                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5433         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5434                 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5435
5436         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5437 }
5438
5439 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5440 {
5441         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5442                 struct hwrm_vnic_free_input req = {0};
5443
5444                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5445                 req.vnic_id =
5446                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5447
5448                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5449                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5450         }
5451 }
5452
5453 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5454 {
5455         u16 i;
5456
5457         for (i = 0; i < bp->nr_vnics; i++)
5458                 bnxt_hwrm_vnic_free_one(bp, i);
5459 }
5460
5461 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5462                                 unsigned int start_rx_ring_idx,
5463                                 unsigned int nr_rings)
5464 {
5465         int rc = 0;
5466         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5467         struct hwrm_vnic_alloc_input req = {0};
5468         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5469         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5470
5471         if (bp->flags & BNXT_FLAG_CHIP_P5)
5472                 goto vnic_no_ring_grps;
5473
5474         /* map ring groups to this vnic */
5475         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5476                 grp_idx = bp->rx_ring[i].bnapi->index;
5477                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5478                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5479                                    j, nr_rings);
5480                         break;
5481                 }
5482                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5483         }
5484
5485 vnic_no_ring_grps:
5486         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5487                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5488         if (vnic_id == 0)
5489                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5490
5491         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5492
5493         mutex_lock(&bp->hwrm_cmd_lock);
5494         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5495         if (!rc)
5496                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5497         mutex_unlock(&bp->hwrm_cmd_lock);
5498         return rc;
5499 }
5500
5501 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5502 {
5503         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5504         struct hwrm_vnic_qcaps_input req = {0};
5505         int rc;
5506
5507         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5508         bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5509         if (bp->hwrm_spec_code < 0x10600)
5510                 return 0;
5511
5512         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5513         mutex_lock(&bp->hwrm_cmd_lock);
5514         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5515         if (!rc) {
5516                 u32 flags = le32_to_cpu(resp->flags);
5517
5518                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5519                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5520                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5521                 if (flags &
5522                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5523                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5524
5525                 /* Older P5 fw before EXT_HW_STATS support did not set
5526                  * VLAN_STRIP_CAP properly.
5527                  */
5528                 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5529                     (BNXT_CHIP_P5_THOR(bp) &&
5530                      !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5531                         bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5532                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5533                 if (bp->max_tpa_v2) {
5534                         if (BNXT_CHIP_P5_THOR(bp))
5535                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5536                         else
5537                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5538                 }
5539         }
5540         mutex_unlock(&bp->hwrm_cmd_lock);
5541         return rc;
5542 }
5543
5544 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5545 {
5546         u16 i;
5547         u32 rc = 0;
5548
5549         if (bp->flags & BNXT_FLAG_CHIP_P5)
5550                 return 0;
5551
5552         mutex_lock(&bp->hwrm_cmd_lock);
5553         for (i = 0; i < bp->rx_nr_rings; i++) {
5554                 struct hwrm_ring_grp_alloc_input req = {0};
5555                 struct hwrm_ring_grp_alloc_output *resp =
5556                                         bp->hwrm_cmd_resp_addr;
5557                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5558
5559                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5560
5561                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5562                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5563                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5564                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5565
5566                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5567                                         HWRM_CMD_TIMEOUT);
5568                 if (rc)
5569                         break;
5570
5571                 bp->grp_info[grp_idx].fw_grp_id =
5572                         le32_to_cpu(resp->ring_group_id);
5573         }
5574         mutex_unlock(&bp->hwrm_cmd_lock);
5575         return rc;
5576 }
5577
5578 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5579 {
5580         u16 i;
5581         struct hwrm_ring_grp_free_input req = {0};
5582
5583         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5584                 return;
5585
5586         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5587
5588         mutex_lock(&bp->hwrm_cmd_lock);
5589         for (i = 0; i < bp->cp_nr_rings; i++) {
5590                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5591                         continue;
5592                 req.ring_group_id =
5593                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
5594
5595                 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5596                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5597         }
5598         mutex_unlock(&bp->hwrm_cmd_lock);
5599 }
5600
5601 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5602                                     struct bnxt_ring_struct *ring,
5603                                     u32 ring_type, u32 map_index)
5604 {
5605         int rc = 0, err = 0;
5606         struct hwrm_ring_alloc_input req = {0};
5607         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5608         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5609         struct bnxt_ring_grp_info *grp_info;
5610         u16 ring_id;
5611
5612         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5613
5614         req.enables = 0;
5615         if (rmem->nr_pages > 1) {
5616                 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5617                 /* Page size is in log2 units */
5618                 req.page_size = BNXT_PAGE_SHIFT;
5619                 req.page_tbl_depth = 1;
5620         } else {
5621                 req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5622         }
5623         req.fbo = 0;
5624         /* Association of ring index with doorbell index and MSIX number */
5625         req.logical_id = cpu_to_le16(map_index);
5626
5627         switch (ring_type) {
5628         case HWRM_RING_ALLOC_TX: {
5629                 struct bnxt_tx_ring_info *txr;
5630
5631                 txr = container_of(ring, struct bnxt_tx_ring_info,
5632                                    tx_ring_struct);
5633                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5634                 /* Association of transmit ring with completion ring */
5635                 grp_info = &bp->grp_info[ring->grp_idx];
5636                 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5637                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5638                 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5639                 req.queue_id = cpu_to_le16(ring->queue_id);
5640                 break;
5641         }
5642         case HWRM_RING_ALLOC_RX:
5643                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5644                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5645                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5646                         u16 flags = 0;
5647
5648                         /* Association of rx ring with stats context */
5649                         grp_info = &bp->grp_info[ring->grp_idx];
5650                         req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5651                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5652                         req.enables |= cpu_to_le32(
5653                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5654                         if (NET_IP_ALIGN == 2)
5655                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5656                         req.flags = cpu_to_le16(flags);
5657                 }
5658                 break;
5659         case HWRM_RING_ALLOC_AGG:
5660                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5661                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5662                         /* Association of agg ring with rx ring */
5663                         grp_info = &bp->grp_info[ring->grp_idx];
5664                         req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5665                         req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5666                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5667                         req.enables |= cpu_to_le32(
5668                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5669                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5670                 } else {
5671                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5672                 }
5673                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5674                 break;
5675         case HWRM_RING_ALLOC_CMPL:
5676                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5677                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5678                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5679                         /* Association of cp ring with nq */
5680                         grp_info = &bp->grp_info[map_index];
5681                         req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5682                         req.cq_handle = cpu_to_le64(ring->handle);
5683                         req.enables |= cpu_to_le32(
5684                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5685                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5686                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5687                 }
5688                 break;
5689         case HWRM_RING_ALLOC_NQ:
5690                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5691                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5692                 if (bp->flags & BNXT_FLAG_USING_MSIX)
5693                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5694                 break;
5695         default:
5696                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5697                            ring_type);
5698                 return -1;
5699         }
5700
5701         mutex_lock(&bp->hwrm_cmd_lock);
5702         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5703         err = le16_to_cpu(resp->error_code);
5704         ring_id = le16_to_cpu(resp->ring_id);
5705         mutex_unlock(&bp->hwrm_cmd_lock);
5706
5707         if (rc || err) {
5708                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5709                            ring_type, rc, err);
5710                 return -EIO;
5711         }
5712         ring->fw_ring_id = ring_id;
5713         return rc;
5714 }
5715
5716 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5717 {
5718         int rc;
5719
5720         if (BNXT_PF(bp)) {
5721                 struct hwrm_func_cfg_input req = {0};
5722
5723                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5724                 req.fid = cpu_to_le16(0xffff);
5725                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5726                 req.async_event_cr = cpu_to_le16(idx);
5727                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5728         } else {
5729                 struct hwrm_func_vf_cfg_input req = {0};
5730
5731                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5732                 req.enables =
5733                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5734                 req.async_event_cr = cpu_to_le16(idx);
5735                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5736         }
5737         return rc;
5738 }
5739
5740 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5741                         u32 map_idx, u32 xid)
5742 {
5743         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5744                 if (BNXT_PF(bp))
5745                         db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5746                 else
5747                         db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5748                 switch (ring_type) {
5749                 case HWRM_RING_ALLOC_TX:
5750                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5751                         break;
5752                 case HWRM_RING_ALLOC_RX:
5753                 case HWRM_RING_ALLOC_AGG:
5754                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5755                         break;
5756                 case HWRM_RING_ALLOC_CMPL:
5757                         db->db_key64 = DBR_PATH_L2;
5758                         break;
5759                 case HWRM_RING_ALLOC_NQ:
5760                         db->db_key64 = DBR_PATH_L2;
5761                         break;
5762                 }
5763                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5764         } else {
5765                 db->doorbell = bp->bar1 + map_idx * 0x80;
5766                 switch (ring_type) {
5767                 case HWRM_RING_ALLOC_TX:
5768                         db->db_key32 = DB_KEY_TX;
5769                         break;
5770                 case HWRM_RING_ALLOC_RX:
5771                 case HWRM_RING_ALLOC_AGG:
5772                         db->db_key32 = DB_KEY_RX;
5773                         break;
5774                 case HWRM_RING_ALLOC_CMPL:
5775                         db->db_key32 = DB_KEY_CP;
5776                         break;
5777                 }
5778         }
5779 }
5780
5781 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5782 {
5783         bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5784         int i, rc = 0;
5785         u32 type;
5786
5787         if (bp->flags & BNXT_FLAG_CHIP_P5)
5788                 type = HWRM_RING_ALLOC_NQ;
5789         else
5790                 type = HWRM_RING_ALLOC_CMPL;
5791         for (i = 0; i < bp->cp_nr_rings; i++) {
5792                 struct bnxt_napi *bnapi = bp->bnapi[i];
5793                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5794                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5795                 u32 map_idx = ring->map_idx;
5796                 unsigned int vector;
5797
5798                 vector = bp->irq_tbl[map_idx].vector;
5799                 disable_irq_nosync(vector);
5800                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5801                 if (rc) {
5802                         enable_irq(vector);
5803                         goto err_out;
5804                 }
5805                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5806                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5807                 enable_irq(vector);
5808                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5809
5810                 if (!i) {
5811                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5812                         if (rc)
5813                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5814                 }
5815         }
5816
5817         type = HWRM_RING_ALLOC_TX;
5818         for (i = 0; i < bp->tx_nr_rings; i++) {
5819                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5820                 struct bnxt_ring_struct *ring;
5821                 u32 map_idx;
5822
5823                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5824                         struct bnxt_napi *bnapi = txr->bnapi;
5825                         struct bnxt_cp_ring_info *cpr, *cpr2;
5826                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5827
5828                         cpr = &bnapi->cp_ring;
5829                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5830                         ring = &cpr2->cp_ring_struct;
5831                         ring->handle = BNXT_TX_HDL;
5832                         map_idx = bnapi->index;
5833                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5834                         if (rc)
5835                                 goto err_out;
5836                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5837                                     ring->fw_ring_id);
5838                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5839                 }
5840                 ring = &txr->tx_ring_struct;
5841                 map_idx = i;
5842                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5843                 if (rc)
5844                         goto err_out;
5845                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5846         }
5847
5848         type = HWRM_RING_ALLOC_RX;
5849         for (i = 0; i < bp->rx_nr_rings; i++) {
5850                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5851                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5852                 struct bnxt_napi *bnapi = rxr->bnapi;
5853                 u32 map_idx = bnapi->index;
5854
5855                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5856                 if (rc)
5857                         goto err_out;
5858                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5859                 /* If we have agg rings, post agg buffers first. */
5860                 if (!agg_rings)
5861                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5862                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5863                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5864                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5865                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5866                         struct bnxt_cp_ring_info *cpr2;
5867
5868                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5869                         ring = &cpr2->cp_ring_struct;
5870                         ring->handle = BNXT_RX_HDL;
5871                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5872                         if (rc)
5873                                 goto err_out;
5874                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5875                                     ring->fw_ring_id);
5876                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5877                 }
5878         }
5879
5880         if (agg_rings) {
5881                 type = HWRM_RING_ALLOC_AGG;
5882                 for (i = 0; i < bp->rx_nr_rings; i++) {
5883                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5884                         struct bnxt_ring_struct *ring =
5885                                                 &rxr->rx_agg_ring_struct;
5886                         u32 grp_idx = ring->grp_idx;
5887                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5888
5889                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5890                         if (rc)
5891                                 goto err_out;
5892
5893                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5894                                     ring->fw_ring_id);
5895                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5896                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5897                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5898                 }
5899         }
5900 err_out:
5901         return rc;
5902 }
5903
5904 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5905                                    struct bnxt_ring_struct *ring,
5906                                    u32 ring_type, int cmpl_ring_id)
5907 {
5908         int rc;
5909         struct hwrm_ring_free_input req = {0};
5910         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5911         u16 error_code;
5912
5913         if (BNXT_NO_FW_ACCESS(bp))
5914                 return 0;
5915
5916         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5917         req.ring_type = ring_type;
5918         req.ring_id = cpu_to_le16(ring->fw_ring_id);
5919
5920         mutex_lock(&bp->hwrm_cmd_lock);
5921         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5922         error_code = le16_to_cpu(resp->error_code);
5923         mutex_unlock(&bp->hwrm_cmd_lock);
5924
5925         if (rc || error_code) {
5926                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5927                            ring_type, rc, error_code);
5928                 return -EIO;
5929         }
5930         return 0;
5931 }
5932
5933 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5934 {
5935         u32 type;
5936         int i;
5937
5938         if (!bp->bnapi)
5939                 return;
5940
5941         for (i = 0; i < bp->tx_nr_rings; i++) {
5942                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5943                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5944
5945                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5946                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5947
5948                         hwrm_ring_free_send_msg(bp, ring,
5949                                                 RING_FREE_REQ_RING_TYPE_TX,
5950                                                 close_path ? cmpl_ring_id :
5951                                                 INVALID_HW_RING_ID);
5952                         ring->fw_ring_id = INVALID_HW_RING_ID;
5953                 }
5954         }
5955
5956         for (i = 0; i < bp->rx_nr_rings; i++) {
5957                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5958                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5959                 u32 grp_idx = rxr->bnapi->index;
5960
5961                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5962                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5963
5964                         hwrm_ring_free_send_msg(bp, ring,
5965                                                 RING_FREE_REQ_RING_TYPE_RX,
5966                                                 close_path ? cmpl_ring_id :
5967                                                 INVALID_HW_RING_ID);
5968                         ring->fw_ring_id = INVALID_HW_RING_ID;
5969                         bp->grp_info[grp_idx].rx_fw_ring_id =
5970                                 INVALID_HW_RING_ID;
5971                 }
5972         }
5973
5974         if (bp->flags & BNXT_FLAG_CHIP_P5)
5975                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5976         else
5977                 type = RING_FREE_REQ_RING_TYPE_RX;
5978         for (i = 0; i < bp->rx_nr_rings; i++) {
5979                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5980                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5981                 u32 grp_idx = rxr->bnapi->index;
5982
5983                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5984                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5985
5986                         hwrm_ring_free_send_msg(bp, ring, type,
5987                                                 close_path ? cmpl_ring_id :
5988                                                 INVALID_HW_RING_ID);
5989                         ring->fw_ring_id = INVALID_HW_RING_ID;
5990                         bp->grp_info[grp_idx].agg_fw_ring_id =
5991                                 INVALID_HW_RING_ID;
5992                 }
5993         }
5994
5995         /* The completion rings are about to be freed.  After that the
5996          * IRQ doorbell will not work anymore.  So we need to disable
5997          * IRQ here.
5998          */
5999         bnxt_disable_int_sync(bp);
6000
6001         if (bp->flags & BNXT_FLAG_CHIP_P5)
6002                 type = RING_FREE_REQ_RING_TYPE_NQ;
6003         else
6004                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
6005         for (i = 0; i < bp->cp_nr_rings; i++) {
6006                 struct bnxt_napi *bnapi = bp->bnapi[i];
6007                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6008                 struct bnxt_ring_struct *ring;
6009                 int j;
6010
6011                 for (j = 0; j < 2; j++) {
6012                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
6013
6014                         if (cpr2) {
6015                                 ring = &cpr2->cp_ring_struct;
6016                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
6017                                         continue;
6018                                 hwrm_ring_free_send_msg(bp, ring,
6019                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
6020                                         INVALID_HW_RING_ID);
6021                                 ring->fw_ring_id = INVALID_HW_RING_ID;
6022                         }
6023                 }
6024                 ring = &cpr->cp_ring_struct;
6025                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6026                         hwrm_ring_free_send_msg(bp, ring, type,
6027                                                 INVALID_HW_RING_ID);
6028                         ring->fw_ring_id = INVALID_HW_RING_ID;
6029                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
6030                 }
6031         }
6032 }
6033
6034 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6035                            bool shared);
6036
6037 static int bnxt_hwrm_get_rings(struct bnxt *bp)
6038 {
6039         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6040         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6041         struct hwrm_func_qcfg_input req = {0};
6042         int rc;
6043
6044         if (bp->hwrm_spec_code < 0x10601)
6045                 return 0;
6046
6047         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6048         req.fid = cpu_to_le16(0xffff);
6049         mutex_lock(&bp->hwrm_cmd_lock);
6050         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6051         if (rc) {
6052                 mutex_unlock(&bp->hwrm_cmd_lock);
6053                 return rc;
6054         }
6055
6056         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6057         if (BNXT_NEW_RM(bp)) {
6058                 u16 cp, stats;
6059
6060                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6061                 hw_resc->resv_hw_ring_grps =
6062                         le32_to_cpu(resp->alloc_hw_ring_grps);
6063                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6064                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6065                 stats = le16_to_cpu(resp->alloc_stat_ctx);
6066                 hw_resc->resv_irqs = cp;
6067                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6068                         int rx = hw_resc->resv_rx_rings;
6069                         int tx = hw_resc->resv_tx_rings;
6070
6071                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6072                                 rx >>= 1;
6073                         if (cp < (rx + tx)) {
6074                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6075                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6076                                         rx <<= 1;
6077                                 hw_resc->resv_rx_rings = rx;
6078                                 hw_resc->resv_tx_rings = tx;
6079                         }
6080                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6081                         hw_resc->resv_hw_ring_grps = rx;
6082                 }
6083                 hw_resc->resv_cp_rings = cp;
6084                 hw_resc->resv_stat_ctxs = stats;
6085         }
6086         mutex_unlock(&bp->hwrm_cmd_lock);
6087         return 0;
6088 }
6089
6090 /* Caller must hold bp->hwrm_cmd_lock */
6091 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6092 {
6093         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6094         struct hwrm_func_qcfg_input req = {0};
6095         int rc;
6096
6097         if (bp->hwrm_spec_code < 0x10601)
6098                 return 0;
6099
6100         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6101         req.fid = cpu_to_le16(fid);
6102         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6103         if (!rc)
6104                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6105
6106         return rc;
6107 }
6108
6109 static bool bnxt_rfs_supported(struct bnxt *bp);
6110
6111 static void
6112 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
6113                              int tx_rings, int rx_rings, int ring_grps,
6114                              int cp_rings, int stats, int vnics)
6115 {
6116         u32 enables = 0;
6117
6118         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
6119         req->fid = cpu_to_le16(0xffff);
6120         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6121         req->num_tx_rings = cpu_to_le16(tx_rings);
6122         if (BNXT_NEW_RM(bp)) {
6123                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6124                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6125                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6126                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6127                         enables |= tx_rings + ring_grps ?
6128                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6129                         enables |= rx_rings ?
6130                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6131                 } else {
6132                         enables |= cp_rings ?
6133                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6134                         enables |= ring_grps ?
6135                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6136                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6137                 }
6138                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6139
6140                 req->num_rx_rings = cpu_to_le16(rx_rings);
6141                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6142                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6143                         req->num_msix = cpu_to_le16(cp_rings);
6144                         req->num_rsscos_ctxs =
6145                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6146                 } else {
6147                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
6148                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6149                         req->num_rsscos_ctxs = cpu_to_le16(1);
6150                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6151                             bnxt_rfs_supported(bp))
6152                                 req->num_rsscos_ctxs =
6153                                         cpu_to_le16(ring_grps + 1);
6154                 }
6155                 req->num_stat_ctxs = cpu_to_le16(stats);
6156                 req->num_vnics = cpu_to_le16(vnics);
6157         }
6158         req->enables = cpu_to_le32(enables);
6159 }
6160
6161 static void
6162 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
6163                              struct hwrm_func_vf_cfg_input *req, int tx_rings,
6164                              int rx_rings, int ring_grps, int cp_rings,
6165                              int stats, int vnics)
6166 {
6167         u32 enables = 0;
6168
6169         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
6170         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6171         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6172                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6173         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6174         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6175                 enables |= tx_rings + ring_grps ?
6176                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6177         } else {
6178                 enables |= cp_rings ?
6179                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6180                 enables |= ring_grps ?
6181                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6182         }
6183         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6184         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6185
6186         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6187         req->num_tx_rings = cpu_to_le16(tx_rings);
6188         req->num_rx_rings = cpu_to_le16(rx_rings);
6189         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6190                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6191                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6192         } else {
6193                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6194                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6195                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6196         }
6197         req->num_stat_ctxs = cpu_to_le16(stats);
6198         req->num_vnics = cpu_to_le16(vnics);
6199
6200         req->enables = cpu_to_le32(enables);
6201 }
6202
6203 static int
6204 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6205                            int ring_grps, int cp_rings, int stats, int vnics)
6206 {
6207         struct hwrm_func_cfg_input req = {0};
6208         int rc;
6209
6210         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6211                                      cp_rings, stats, vnics);
6212         if (!req.enables)
6213                 return 0;
6214
6215         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6216         if (rc)
6217                 return rc;
6218
6219         if (bp->hwrm_spec_code < 0x10601)
6220                 bp->hw_resc.resv_tx_rings = tx_rings;
6221
6222         return bnxt_hwrm_get_rings(bp);
6223 }
6224
6225 static int
6226 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6227                            int ring_grps, int cp_rings, int stats, int vnics)
6228 {
6229         struct hwrm_func_vf_cfg_input req = {0};
6230         int rc;
6231
6232         if (!BNXT_NEW_RM(bp)) {
6233                 bp->hw_resc.resv_tx_rings = tx_rings;
6234                 return 0;
6235         }
6236
6237         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6238                                      cp_rings, stats, vnics);
6239         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6240         if (rc)
6241                 return rc;
6242
6243         return bnxt_hwrm_get_rings(bp);
6244 }
6245
6246 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6247                                    int cp, int stat, int vnic)
6248 {
6249         if (BNXT_PF(bp))
6250                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6251                                                   vnic);
6252         else
6253                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6254                                                   vnic);
6255 }
6256
6257 int bnxt_nq_rings_in_use(struct bnxt *bp)
6258 {
6259         int cp = bp->cp_nr_rings;
6260         int ulp_msix, ulp_base;
6261
6262         ulp_msix = bnxt_get_ulp_msix_num(bp);
6263         if (ulp_msix) {
6264                 ulp_base = bnxt_get_ulp_msix_base(bp);
6265                 cp += ulp_msix;
6266                 if ((ulp_base + ulp_msix) > cp)
6267                         cp = ulp_base + ulp_msix;
6268         }
6269         return cp;
6270 }
6271
6272 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6273 {
6274         int cp;
6275
6276         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6277                 return bnxt_nq_rings_in_use(bp);
6278
6279         cp = bp->tx_nr_rings + bp->rx_nr_rings;
6280         return cp;
6281 }
6282
6283 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6284 {
6285         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6286         int cp = bp->cp_nr_rings;
6287
6288         if (!ulp_stat)
6289                 return cp;
6290
6291         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6292                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6293
6294         return cp + ulp_stat;
6295 }
6296
6297 /* Check if a default RSS map needs to be setup.  This function is only
6298  * used on older firmware that does not require reserving RX rings.
6299  */
6300 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6301 {
6302         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6303
6304         /* The RSS map is valid for RX rings set to resv_rx_rings */
6305         if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6306                 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6307                 if (!netif_is_rxfh_configured(bp->dev))
6308                         bnxt_set_dflt_rss_indir_tbl(bp);
6309         }
6310 }
6311
6312 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6313 {
6314         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6315         int cp = bnxt_cp_rings_in_use(bp);
6316         int nq = bnxt_nq_rings_in_use(bp);
6317         int rx = bp->rx_nr_rings, stat;
6318         int vnic = 1, grp = rx;
6319
6320         if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6321             bp->hwrm_spec_code >= 0x10601)
6322                 return true;
6323
6324         /* Old firmware does not need RX ring reservations but we still
6325          * need to setup a default RSS map when needed.  With new firmware
6326          * we go through RX ring reservations first and then set up the
6327          * RSS map for the successfully reserved RX rings when needed.
6328          */
6329         if (!BNXT_NEW_RM(bp)) {
6330                 bnxt_check_rss_tbl_no_rmgr(bp);
6331                 return false;
6332         }
6333         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6334                 vnic = rx + 1;
6335         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6336                 rx <<= 1;
6337         stat = bnxt_get_func_stat_ctxs(bp);
6338         if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6339             hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6340             (hw_resc->resv_hw_ring_grps != grp &&
6341              !(bp->flags & BNXT_FLAG_CHIP_P5)))
6342                 return true;
6343         if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6344             hw_resc->resv_irqs != nq)
6345                 return true;
6346         return false;
6347 }
6348
6349 static int __bnxt_reserve_rings(struct bnxt *bp)
6350 {
6351         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6352         int cp = bnxt_nq_rings_in_use(bp);
6353         int tx = bp->tx_nr_rings;
6354         int rx = bp->rx_nr_rings;
6355         int grp, rx_rings, rc;
6356         int vnic = 1, stat;
6357         bool sh = false;
6358
6359         if (!bnxt_need_reserve_rings(bp))
6360                 return 0;
6361
6362         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6363                 sh = true;
6364         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6365                 vnic = rx + 1;
6366         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6367                 rx <<= 1;
6368         grp = bp->rx_nr_rings;
6369         stat = bnxt_get_func_stat_ctxs(bp);
6370
6371         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6372         if (rc)
6373                 return rc;
6374
6375         tx = hw_resc->resv_tx_rings;
6376         if (BNXT_NEW_RM(bp)) {
6377                 rx = hw_resc->resv_rx_rings;
6378                 cp = hw_resc->resv_irqs;
6379                 grp = hw_resc->resv_hw_ring_grps;
6380                 vnic = hw_resc->resv_vnics;
6381                 stat = hw_resc->resv_stat_ctxs;
6382         }
6383
6384         rx_rings = rx;
6385         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6386                 if (rx >= 2) {
6387                         rx_rings = rx >> 1;
6388                 } else {
6389                         if (netif_running(bp->dev))
6390                                 return -ENOMEM;
6391
6392                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6393                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6394                         bp->dev->hw_features &= ~NETIF_F_LRO;
6395                         bp->dev->features &= ~NETIF_F_LRO;
6396                         bnxt_set_ring_params(bp);
6397                 }
6398         }
6399         rx_rings = min_t(int, rx_rings, grp);
6400         cp = min_t(int, cp, bp->cp_nr_rings);
6401         if (stat > bnxt_get_ulp_stat_ctxs(bp))
6402                 stat -= bnxt_get_ulp_stat_ctxs(bp);
6403         cp = min_t(int, cp, stat);
6404         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6405         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6406                 rx = rx_rings << 1;
6407         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6408         bp->tx_nr_rings = tx;
6409
6410         /* If we cannot reserve all the RX rings, reset the RSS map only
6411          * if absolutely necessary
6412          */
6413         if (rx_rings != bp->rx_nr_rings) {
6414                 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6415                             rx_rings, bp->rx_nr_rings);
6416                 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6417                     (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6418                      bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6419                      bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6420                         netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6421                         bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6422                 }
6423         }
6424         bp->rx_nr_rings = rx_rings;
6425         bp->cp_nr_rings = cp;
6426
6427         if (!tx || !rx || !cp || !grp || !vnic || !stat)
6428                 return -ENOMEM;
6429
6430         if (!netif_is_rxfh_configured(bp->dev))
6431                 bnxt_set_dflt_rss_indir_tbl(bp);
6432
6433         return rc;
6434 }
6435
6436 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6437                                     int ring_grps, int cp_rings, int stats,
6438                                     int vnics)
6439 {
6440         struct hwrm_func_vf_cfg_input req = {0};
6441         u32 flags;
6442
6443         if (!BNXT_NEW_RM(bp))
6444                 return 0;
6445
6446         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6447                                      cp_rings, stats, vnics);
6448         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6449                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6450                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6451                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6452                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6453                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6454         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6455                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6456
6457         req.flags = cpu_to_le32(flags);
6458         return hwrm_send_message_silent(bp, &req, sizeof(req),
6459                                         HWRM_CMD_TIMEOUT);
6460 }
6461
6462 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6463                                     int ring_grps, int cp_rings, int stats,
6464                                     int vnics)
6465 {
6466         struct hwrm_func_cfg_input req = {0};
6467         u32 flags;
6468
6469         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6470                                      cp_rings, stats, vnics);
6471         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6472         if (BNXT_NEW_RM(bp)) {
6473                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6474                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6475                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6476                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6477                 if (bp->flags & BNXT_FLAG_CHIP_P5)
6478                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6479                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6480                 else
6481                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6482         }
6483
6484         req.flags = cpu_to_le32(flags);
6485         return hwrm_send_message_silent(bp, &req, sizeof(req),
6486                                         HWRM_CMD_TIMEOUT);
6487 }
6488
6489 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6490                                  int ring_grps, int cp_rings, int stats,
6491                                  int vnics)
6492 {
6493         if (bp->hwrm_spec_code < 0x10801)
6494                 return 0;
6495
6496         if (BNXT_PF(bp))
6497                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6498                                                 ring_grps, cp_rings, stats,
6499                                                 vnics);
6500
6501         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6502                                         cp_rings, stats, vnics);
6503 }
6504
6505 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6506 {
6507         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6508         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6509         struct hwrm_ring_aggint_qcaps_input req = {0};
6510         int rc;
6511
6512         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6513         coal_cap->num_cmpl_dma_aggr_max = 63;
6514         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6515         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6516         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6517         coal_cap->int_lat_tmr_min_max = 65535;
6518         coal_cap->int_lat_tmr_max_max = 65535;
6519         coal_cap->num_cmpl_aggr_int_max = 65535;
6520         coal_cap->timer_units = 80;
6521
6522         if (bp->hwrm_spec_code < 0x10902)
6523                 return;
6524
6525         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6526         mutex_lock(&bp->hwrm_cmd_lock);
6527         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6528         if (!rc) {
6529                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6530                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6531                 coal_cap->num_cmpl_dma_aggr_max =
6532                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6533                 coal_cap->num_cmpl_dma_aggr_during_int_max =
6534                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6535                 coal_cap->cmpl_aggr_dma_tmr_max =
6536                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6537                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6538                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6539                 coal_cap->int_lat_tmr_min_max =
6540                         le16_to_cpu(resp->int_lat_tmr_min_max);
6541                 coal_cap->int_lat_tmr_max_max =
6542                         le16_to_cpu(resp->int_lat_tmr_max_max);
6543                 coal_cap->num_cmpl_aggr_int_max =
6544                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
6545                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6546         }
6547         mutex_unlock(&bp->hwrm_cmd_lock);
6548 }
6549
6550 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6551 {
6552         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6553
6554         return usec * 1000 / coal_cap->timer_units;
6555 }
6556
6557 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6558         struct bnxt_coal *hw_coal,
6559         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6560 {
6561         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6562         u32 cmpl_params = coal_cap->cmpl_params;
6563         u16 val, tmr, max, flags = 0;
6564
6565         max = hw_coal->bufs_per_record * 128;
6566         if (hw_coal->budget)
6567                 max = hw_coal->bufs_per_record * hw_coal->budget;
6568         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6569
6570         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6571         req->num_cmpl_aggr_int = cpu_to_le16(val);
6572
6573         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6574         req->num_cmpl_dma_aggr = cpu_to_le16(val);
6575
6576         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6577                       coal_cap->num_cmpl_dma_aggr_during_int_max);
6578         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6579
6580         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6581         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6582         req->int_lat_tmr_max = cpu_to_le16(tmr);
6583
6584         /* min timer set to 1/2 of interrupt timer */
6585         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6586                 val = tmr / 2;
6587                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6588                 req->int_lat_tmr_min = cpu_to_le16(val);
6589                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6590         }
6591
6592         /* buf timer set to 1/4 of interrupt timer */
6593         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6594         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6595
6596         if (cmpl_params &
6597             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6598                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6599                 val = clamp_t(u16, tmr, 1,
6600                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6601                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6602                 req->enables |=
6603                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6604         }
6605
6606         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6607                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6608         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6609             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6610                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6611         req->flags = cpu_to_le16(flags);
6612         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6613 }
6614
6615 /* Caller holds bp->hwrm_cmd_lock */
6616 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6617                                    struct bnxt_coal *hw_coal)
6618 {
6619         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6620         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6621         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6622         u32 nq_params = coal_cap->nq_params;
6623         u16 tmr;
6624
6625         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6626                 return 0;
6627
6628         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6629                                -1, -1);
6630         req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6631         req.flags =
6632                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6633
6634         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6635         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6636         req.int_lat_tmr_min = cpu_to_le16(tmr);
6637         req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6638         return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6639 }
6640
6641 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6642 {
6643         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6644         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6645         struct bnxt_coal coal;
6646
6647         /* Tick values in micro seconds.
6648          * 1 coal_buf x bufs_per_record = 1 completion record.
6649          */
6650         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6651
6652         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6653         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6654
6655         if (!bnapi->rx_ring)
6656                 return -ENODEV;
6657
6658         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6659                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6660
6661         bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6662
6663         req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6664
6665         return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6666                                  HWRM_CMD_TIMEOUT);
6667 }
6668
6669 int bnxt_hwrm_set_coal(struct bnxt *bp)
6670 {
6671         int i, rc = 0;
6672         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6673                                                            req_tx = {0}, *req;
6674
6675         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6676                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6677         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6678                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6679
6680         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6681         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6682
6683         mutex_lock(&bp->hwrm_cmd_lock);
6684         for (i = 0; i < bp->cp_nr_rings; i++) {
6685                 struct bnxt_napi *bnapi = bp->bnapi[i];
6686                 struct bnxt_coal *hw_coal;
6687                 u16 ring_id;
6688
6689                 req = &req_rx;
6690                 if (!bnapi->rx_ring) {
6691                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6692                         req = &req_tx;
6693                 } else {
6694                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6695                 }
6696                 req->ring_id = cpu_to_le16(ring_id);
6697
6698                 rc = _hwrm_send_message(bp, req, sizeof(*req),
6699                                         HWRM_CMD_TIMEOUT);
6700                 if (rc)
6701                         break;
6702
6703                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6704                         continue;
6705
6706                 if (bnapi->rx_ring && bnapi->tx_ring) {
6707                         req = &req_tx;
6708                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6709                         req->ring_id = cpu_to_le16(ring_id);
6710                         rc = _hwrm_send_message(bp, req, sizeof(*req),
6711                                                 HWRM_CMD_TIMEOUT);
6712                         if (rc)
6713                                 break;
6714                 }
6715                 if (bnapi->rx_ring)
6716                         hw_coal = &bp->rx_coal;
6717                 else
6718                         hw_coal = &bp->tx_coal;
6719                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6720         }
6721         mutex_unlock(&bp->hwrm_cmd_lock);
6722         return rc;
6723 }
6724
6725 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6726 {
6727         struct hwrm_stat_ctx_clr_stats_input req0 = {0};
6728         struct hwrm_stat_ctx_free_input req = {0};
6729         int i;
6730
6731         if (!bp->bnapi)
6732                 return;
6733
6734         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6735                 return;
6736
6737         bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
6738         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6739
6740         mutex_lock(&bp->hwrm_cmd_lock);
6741         for (i = 0; i < bp->cp_nr_rings; i++) {
6742                 struct bnxt_napi *bnapi = bp->bnapi[i];
6743                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6744
6745                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6746                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6747                         if (BNXT_FW_MAJ(bp) <= 20) {
6748                                 req0.stat_ctx_id = req.stat_ctx_id;
6749                                 _hwrm_send_message(bp, &req0, sizeof(req0),
6750                                                    HWRM_CMD_TIMEOUT);
6751                         }
6752                         _hwrm_send_message(bp, &req, sizeof(req),
6753                                            HWRM_CMD_TIMEOUT);
6754
6755                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6756                 }
6757         }
6758         mutex_unlock(&bp->hwrm_cmd_lock);
6759 }
6760
6761 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6762 {
6763         int rc = 0, i;
6764         struct hwrm_stat_ctx_alloc_input req = {0};
6765         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6766
6767         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6768                 return 0;
6769
6770         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6771
6772         req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6773         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6774
6775         mutex_lock(&bp->hwrm_cmd_lock);
6776         for (i = 0; i < bp->cp_nr_rings; i++) {
6777                 struct bnxt_napi *bnapi = bp->bnapi[i];
6778                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6779
6780                 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6781
6782                 rc = _hwrm_send_message(bp, &req, sizeof(req),
6783                                         HWRM_CMD_TIMEOUT);
6784                 if (rc)
6785                         break;
6786
6787                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6788
6789                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6790         }
6791         mutex_unlock(&bp->hwrm_cmd_lock);
6792         return rc;
6793 }
6794
6795 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6796 {
6797         struct hwrm_func_qcfg_input req = {0};
6798         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6799         u32 min_db_offset = 0;
6800         u16 flags;
6801         int rc;
6802
6803         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6804         req.fid = cpu_to_le16(0xffff);
6805         mutex_lock(&bp->hwrm_cmd_lock);
6806         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6807         if (rc)
6808                 goto func_qcfg_exit;
6809
6810 #ifdef CONFIG_BNXT_SRIOV
6811         if (BNXT_VF(bp)) {
6812                 struct bnxt_vf_info *vf = &bp->vf;
6813
6814                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6815         } else {
6816                 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6817         }
6818 #endif
6819         flags = le16_to_cpu(resp->flags);
6820         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6821                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6822                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6823                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6824                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6825         }
6826         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6827                 bp->flags |= BNXT_FLAG_MULTI_HOST;
6828         if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6829                 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6830
6831         switch (resp->port_partition_type) {
6832         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6833         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6834         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6835                 bp->port_partition_type = resp->port_partition_type;
6836                 break;
6837         }
6838         if (bp->hwrm_spec_code < 0x10707 ||
6839             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6840                 bp->br_mode = BRIDGE_MODE_VEB;
6841         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6842                 bp->br_mode = BRIDGE_MODE_VEPA;
6843         else
6844                 bp->br_mode = BRIDGE_MODE_UNDEF;
6845
6846         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6847         if (!bp->max_mtu)
6848                 bp->max_mtu = BNXT_MAX_MTU;
6849
6850         if (bp->db_size)
6851                 goto func_qcfg_exit;
6852
6853         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6854                 if (BNXT_PF(bp))
6855                         min_db_offset = DB_PF_OFFSET_P5;
6856                 else
6857                         min_db_offset = DB_VF_OFFSET_P5;
6858         }
6859         bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6860                                  1024);
6861         if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6862             bp->db_size <= min_db_offset)
6863                 bp->db_size = pci_resource_len(bp->pdev, 2);
6864
6865 func_qcfg_exit:
6866         mutex_unlock(&bp->hwrm_cmd_lock);
6867         return rc;
6868 }
6869
6870 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6871                         struct hwrm_func_backing_store_qcaps_output *resp)
6872 {
6873         struct bnxt_mem_init *mem_init;
6874         u16 init_mask;
6875         u8 init_val;
6876         u8 *offset;
6877         int i;
6878
6879         init_val = resp->ctx_kind_initializer;
6880         init_mask = le16_to_cpu(resp->ctx_init_mask);
6881         offset = &resp->qp_init_offset;
6882         mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6883         for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
6884                 mem_init->init_val = init_val;
6885                 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6886                 if (!init_mask)
6887                         continue;
6888                 if (i == BNXT_CTX_MEM_INIT_STAT)
6889                         offset = &resp->stat_init_offset;
6890                 if (init_mask & (1 << i))
6891                         mem_init->offset = *offset * 4;
6892                 else
6893                         mem_init->init_val = 0;
6894         }
6895         ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6896         ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6897         ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6898         ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6899         ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6900         ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
6901 }
6902
6903 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6904 {
6905         struct hwrm_func_backing_store_qcaps_input req = {0};
6906         struct hwrm_func_backing_store_qcaps_output *resp =
6907                 bp->hwrm_cmd_resp_addr;
6908         int rc;
6909
6910         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6911                 return 0;
6912
6913         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6914         mutex_lock(&bp->hwrm_cmd_lock);
6915         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6916         if (!rc) {
6917                 struct bnxt_ctx_pg_info *ctx_pg;
6918                 struct bnxt_ctx_mem_info *ctx;
6919                 int i, tqm_rings;
6920
6921                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6922                 if (!ctx) {
6923                         rc = -ENOMEM;
6924                         goto ctx_err;
6925                 }
6926                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6927                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6928                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6929                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6930                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6931                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6932                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6933                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6934                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6935                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6936                 ctx->vnic_max_vnic_entries =
6937                         le16_to_cpu(resp->vnic_max_vnic_entries);
6938                 ctx->vnic_max_ring_table_entries =
6939                         le16_to_cpu(resp->vnic_max_ring_table_entries);
6940                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6941                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6942                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6943                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6944                 ctx->tqm_min_entries_per_ring =
6945                         le32_to_cpu(resp->tqm_min_entries_per_ring);
6946                 ctx->tqm_max_entries_per_ring =
6947                         le32_to_cpu(resp->tqm_max_entries_per_ring);
6948                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6949                 if (!ctx->tqm_entries_multiple)
6950                         ctx->tqm_entries_multiple = 1;
6951                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6952                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6953                 ctx->mrav_num_entries_units =
6954                         le16_to_cpu(resp->mrav_num_entries_units);
6955                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6956                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6957
6958                 bnxt_init_ctx_initializer(ctx, resp);
6959
6960                 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6961                 if (!ctx->tqm_fp_rings_count)
6962                         ctx->tqm_fp_rings_count = bp->max_q;
6963                 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6964                         ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
6965
6966                 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
6967                 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6968                 if (!ctx_pg) {
6969                         kfree(ctx);
6970                         rc = -ENOMEM;
6971                         goto ctx_err;
6972                 }
6973                 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6974                         ctx->tqm_mem[i] = ctx_pg;
6975                 bp->ctx = ctx;
6976         } else {
6977                 rc = 0;
6978         }
6979 ctx_err:
6980         mutex_unlock(&bp->hwrm_cmd_lock);
6981         return rc;
6982 }
6983
6984 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6985                                   __le64 *pg_dir)
6986 {
6987         if (!rmem->nr_pages)
6988                 return;
6989
6990         BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
6991         if (rmem->depth >= 1) {
6992                 if (rmem->depth == 2)
6993                         *pg_attr |= 2;
6994                 else
6995                         *pg_attr |= 1;
6996                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6997         } else {
6998                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6999         }
7000 }
7001
7002 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
7003         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
7004          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
7005          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
7006          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
7007          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7008
7009 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7010 {
7011         struct hwrm_func_backing_store_cfg_input req = {0};
7012         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7013         struct bnxt_ctx_pg_info *ctx_pg;
7014         u32 req_len = sizeof(req);
7015         __le32 *num_entries;
7016         __le64 *pg_dir;
7017         u32 flags = 0;
7018         u8 *pg_attr;
7019         u32 ena;
7020         int i;
7021
7022         if (!ctx)
7023                 return 0;
7024
7025         if (req_len > bp->hwrm_max_ext_req_len)
7026                 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
7027         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
7028         req.enables = cpu_to_le32(enables);
7029
7030         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7031                 ctx_pg = &ctx->qp_mem;
7032                 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
7033                 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7034                 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7035                 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7036                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7037                                       &req.qpc_pg_size_qpc_lvl,
7038                                       &req.qpc_page_dir);
7039         }
7040         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7041                 ctx_pg = &ctx->srq_mem;
7042                 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
7043                 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7044                 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7045                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7046                                       &req.srq_pg_size_srq_lvl,
7047                                       &req.srq_page_dir);
7048         }
7049         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7050                 ctx_pg = &ctx->cq_mem;
7051                 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
7052                 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7053                 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7054                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
7055                                       &req.cq_page_dir);
7056         }
7057         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7058                 ctx_pg = &ctx->vnic_mem;
7059                 req.vnic_num_vnic_entries =
7060                         cpu_to_le16(ctx->vnic_max_vnic_entries);
7061                 req.vnic_num_ring_table_entries =
7062                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
7063                 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7064                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7065                                       &req.vnic_pg_size_vnic_lvl,
7066                                       &req.vnic_page_dir);
7067         }
7068         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7069                 ctx_pg = &ctx->stat_mem;
7070                 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7071                 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7072                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7073                                       &req.stat_pg_size_stat_lvl,
7074                                       &req.stat_page_dir);
7075         }
7076         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7077                 ctx_pg = &ctx->mrav_mem;
7078                 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7079                 if (ctx->mrav_num_entries_units)
7080                         flags |=
7081                         FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7082                 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7083                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7084                                       &req.mrav_pg_size_mrav_lvl,
7085                                       &req.mrav_page_dir);
7086         }
7087         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7088                 ctx_pg = &ctx->tim_mem;
7089                 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
7090                 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7091                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7092                                       &req.tim_pg_size_tim_lvl,
7093                                       &req.tim_page_dir);
7094         }
7095         for (i = 0, num_entries = &req.tqm_sp_num_entries,
7096              pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
7097              pg_dir = &req.tqm_sp_page_dir,
7098              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7099              i < BNXT_MAX_TQM_RINGS;
7100              i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7101                 if (!(enables & ena))
7102                         continue;
7103
7104                 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7105                 ctx_pg = ctx->tqm_mem[i];
7106                 *num_entries = cpu_to_le32(ctx_pg->entries);
7107                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7108         }
7109         req.flags = cpu_to_le32(flags);
7110         return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT);
7111 }
7112
7113 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7114                                   struct bnxt_ctx_pg_info *ctx_pg)
7115 {
7116         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7117
7118         rmem->page_size = BNXT_PAGE_SIZE;
7119         rmem->pg_arr = ctx_pg->ctx_pg_arr;
7120         rmem->dma_arr = ctx_pg->ctx_dma_arr;
7121         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7122         if (rmem->depth >= 1)
7123                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7124         return bnxt_alloc_ring(bp, rmem);
7125 }
7126
7127 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7128                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7129                                   u8 depth, struct bnxt_mem_init *mem_init)
7130 {
7131         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7132         int rc;
7133
7134         if (!mem_size)
7135                 return -EINVAL;
7136
7137         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7138         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7139                 ctx_pg->nr_pages = 0;
7140                 return -EINVAL;
7141         }
7142         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7143                 int nr_tbls, i;
7144
7145                 rmem->depth = 2;
7146                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7147                                              GFP_KERNEL);
7148                 if (!ctx_pg->ctx_pg_tbl)
7149                         return -ENOMEM;
7150                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7151                 rmem->nr_pages = nr_tbls;
7152                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7153                 if (rc)
7154                         return rc;
7155                 for (i = 0; i < nr_tbls; i++) {
7156                         struct bnxt_ctx_pg_info *pg_tbl;
7157
7158                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7159                         if (!pg_tbl)
7160                                 return -ENOMEM;
7161                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7162                         rmem = &pg_tbl->ring_mem;
7163                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7164                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7165                         rmem->depth = 1;
7166                         rmem->nr_pages = MAX_CTX_PAGES;
7167                         rmem->mem_init = mem_init;
7168                         if (i == (nr_tbls - 1)) {
7169                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7170
7171                                 if (rem)
7172                                         rmem->nr_pages = rem;
7173                         }
7174                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7175                         if (rc)
7176                                 break;
7177                 }
7178         } else {
7179                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7180                 if (rmem->nr_pages > 1 || depth)
7181                         rmem->depth = 1;
7182                 rmem->mem_init = mem_init;
7183                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7184         }
7185         return rc;
7186 }
7187
7188 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7189                                   struct bnxt_ctx_pg_info *ctx_pg)
7190 {
7191         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7192
7193         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7194             ctx_pg->ctx_pg_tbl) {
7195                 int i, nr_tbls = rmem->nr_pages;
7196
7197                 for (i = 0; i < nr_tbls; i++) {
7198                         struct bnxt_ctx_pg_info *pg_tbl;
7199                         struct bnxt_ring_mem_info *rmem2;
7200
7201                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
7202                         if (!pg_tbl)
7203                                 continue;
7204                         rmem2 = &pg_tbl->ring_mem;
7205                         bnxt_free_ring(bp, rmem2);
7206                         ctx_pg->ctx_pg_arr[i] = NULL;
7207                         kfree(pg_tbl);
7208                         ctx_pg->ctx_pg_tbl[i] = NULL;
7209                 }
7210                 kfree(ctx_pg->ctx_pg_tbl);
7211                 ctx_pg->ctx_pg_tbl = NULL;
7212         }
7213         bnxt_free_ring(bp, rmem);
7214         ctx_pg->nr_pages = 0;
7215 }
7216
7217 static void bnxt_free_ctx_mem(struct bnxt *bp)
7218 {
7219         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7220         int i;
7221
7222         if (!ctx)
7223                 return;
7224
7225         if (ctx->tqm_mem[0]) {
7226                 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7227                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7228                 kfree(ctx->tqm_mem[0]);
7229                 ctx->tqm_mem[0] = NULL;
7230         }
7231
7232         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7233         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7234         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7235         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7236         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7237         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7238         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7239         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7240 }
7241
7242 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7243 {
7244         struct bnxt_ctx_pg_info *ctx_pg;
7245         struct bnxt_ctx_mem_info *ctx;
7246         struct bnxt_mem_init *init;
7247         u32 mem_size, ena, entries;
7248         u32 entries_sp, min;
7249         u32 num_mr, num_ah;
7250         u32 extra_srqs = 0;
7251         u32 extra_qps = 0;
7252         u8 pg_lvl = 1;
7253         int i, rc;
7254
7255         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7256         if (rc) {
7257                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7258                            rc);
7259                 return rc;
7260         }
7261         ctx = bp->ctx;
7262         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7263                 return 0;
7264
7265         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7266                 pg_lvl = 2;
7267                 extra_qps = 65536;
7268                 extra_srqs = 8192;
7269         }
7270
7271         ctx_pg = &ctx->qp_mem;
7272         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7273                           extra_qps;
7274         if (ctx->qp_entry_size) {
7275                 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7276                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7277                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7278                 if (rc)
7279                         return rc;
7280         }
7281
7282         ctx_pg = &ctx->srq_mem;
7283         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7284         if (ctx->srq_entry_size) {
7285                 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7286                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7287                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7288                 if (rc)
7289                         return rc;
7290         }
7291
7292         ctx_pg = &ctx->cq_mem;
7293         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7294         if (ctx->cq_entry_size) {
7295                 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7296                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7297                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7298                 if (rc)
7299                         return rc;
7300         }
7301
7302         ctx_pg = &ctx->vnic_mem;
7303         ctx_pg->entries = ctx->vnic_max_vnic_entries +
7304                           ctx->vnic_max_ring_table_entries;
7305         if (ctx->vnic_entry_size) {
7306                 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7307                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7308                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7309                 if (rc)
7310                         return rc;
7311         }
7312
7313         ctx_pg = &ctx->stat_mem;
7314         ctx_pg->entries = ctx->stat_max_entries;
7315         if (ctx->stat_entry_size) {
7316                 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7317                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7318                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7319                 if (rc)
7320                         return rc;
7321         }
7322
7323         ena = 0;
7324         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7325                 goto skip_rdma;
7326
7327         ctx_pg = &ctx->mrav_mem;
7328         /* 128K extra is needed to accommodate static AH context
7329          * allocation by f/w.
7330          */
7331         num_mr = 1024 * 256;
7332         num_ah = 1024 * 128;
7333         ctx_pg->entries = num_mr + num_ah;
7334         if (ctx->mrav_entry_size) {
7335                 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7336                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7337                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7338                 if (rc)
7339                         return rc;
7340         }
7341         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7342         if (ctx->mrav_num_entries_units)
7343                 ctx_pg->entries =
7344                         ((num_mr / ctx->mrav_num_entries_units) << 16) |
7345                          (num_ah / ctx->mrav_num_entries_units);
7346
7347         ctx_pg = &ctx->tim_mem;
7348         ctx_pg->entries = ctx->qp_mem.entries;
7349         if (ctx->tim_entry_size) {
7350                 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7351                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7352                 if (rc)
7353                         return rc;
7354         }
7355         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7356
7357 skip_rdma:
7358         min = ctx->tqm_min_entries_per_ring;
7359         entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7360                      2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7361         entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7362         entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7363         entries = roundup(entries, ctx->tqm_entries_multiple);
7364         entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7365         for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7366                 ctx_pg = ctx->tqm_mem[i];
7367                 ctx_pg->entries = i ? entries : entries_sp;
7368                 if (ctx->tqm_entry_size) {
7369                         mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7370                         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7371                                                     NULL);
7372                         if (rc)
7373                                 return rc;
7374                 }
7375                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7376         }
7377         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7378         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7379         if (rc) {
7380                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7381                            rc);
7382                 return rc;
7383         }
7384         ctx->flags |= BNXT_CTX_FLAG_INITED;
7385         return 0;
7386 }
7387
7388 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7389 {
7390         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7391         struct hwrm_func_resource_qcaps_input req = {0};
7392         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7393         int rc;
7394
7395         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7396         req.fid = cpu_to_le16(0xffff);
7397
7398         mutex_lock(&bp->hwrm_cmd_lock);
7399         rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7400                                        HWRM_CMD_TIMEOUT);
7401         if (rc)
7402                 goto hwrm_func_resc_qcaps_exit;
7403
7404         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7405         if (!all)
7406                 goto hwrm_func_resc_qcaps_exit;
7407
7408         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7409         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7410         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7411         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7412         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7413         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7414         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7415         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7416         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7417         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7418         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7419         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7420         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7421         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7422         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7423         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7424
7425         if (bp->flags & BNXT_FLAG_CHIP_P5) {
7426                 u16 max_msix = le16_to_cpu(resp->max_msix);
7427
7428                 hw_resc->max_nqs = max_msix;
7429                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7430         }
7431
7432         if (BNXT_PF(bp)) {
7433                 struct bnxt_pf_info *pf = &bp->pf;
7434
7435                 pf->vf_resv_strategy =
7436                         le16_to_cpu(resp->vf_reservation_strategy);
7437                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7438                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7439         }
7440 hwrm_func_resc_qcaps_exit:
7441         mutex_unlock(&bp->hwrm_cmd_lock);
7442         return rc;
7443 }
7444
7445 /* bp->hwrm_cmd_lock already held. */
7446 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7447 {
7448         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7449         struct hwrm_port_mac_ptp_qcfg_input req = {0};
7450         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7451         u8 flags;
7452         int rc;
7453
7454         if (bp->hwrm_spec_code < 0x10801) {
7455                 rc = -ENODEV;
7456                 goto no_ptp;
7457         }
7458
7459         req.port_id = cpu_to_le16(bp->pf.port_id);
7460         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_PTP_QCFG, -1, -1);
7461         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7462         if (rc)
7463                 goto no_ptp;
7464
7465         flags = resp->flags;
7466         if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7467                 rc = -ENODEV;
7468                 goto no_ptp;
7469         }
7470         if (!ptp) {
7471                 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7472                 if (!ptp)
7473                         return -ENOMEM;
7474                 ptp->bp = bp;
7475                 bp->ptp_cfg = ptp;
7476         }
7477         if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7478                 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7479                 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7480         } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7481                 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7482                 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7483         } else {
7484                 rc = -ENODEV;
7485                 goto no_ptp;
7486         }
7487         return 0;
7488
7489 no_ptp:
7490         kfree(ptp);
7491         bp->ptp_cfg = NULL;
7492         return rc;
7493 }
7494
7495 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7496 {
7497         int rc = 0;
7498         struct hwrm_func_qcaps_input req = {0};
7499         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7500         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7501         u32 flags, flags_ext;
7502
7503         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7504         req.fid = cpu_to_le16(0xffff);
7505
7506         mutex_lock(&bp->hwrm_cmd_lock);
7507         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7508         if (rc)
7509                 goto hwrm_func_qcaps_exit;
7510
7511         flags = le32_to_cpu(resp->flags);
7512         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7513                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7514         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7515                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7516         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7517                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7518         if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7519                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7520         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7521                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7522         if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7523                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7524         if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7525                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7526         if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7527                 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7528
7529         flags_ext = le32_to_cpu(resp->flags_ext);
7530         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7531                 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7532
7533         bp->tx_push_thresh = 0;
7534         if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7535             BNXT_FW_MAJ(bp) > 217)
7536                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7537
7538         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7539         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7540         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7541         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7542         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7543         if (!hw_resc->max_hw_ring_grps)
7544                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7545         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7546         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7547         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7548
7549         if (BNXT_PF(bp)) {
7550                 struct bnxt_pf_info *pf = &bp->pf;
7551
7552                 pf->fw_fid = le16_to_cpu(resp->fid);
7553                 pf->port_id = le16_to_cpu(resp->port_id);
7554                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7555                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7556                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7557                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7558                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7559                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7560                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7561                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7562                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7563                 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7564                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7565                         bp->flags |= BNXT_FLAG_WOL_CAP;
7566                 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED)
7567                         __bnxt_hwrm_ptp_qcfg(bp);
7568         } else {
7569 #ifdef CONFIG_BNXT_SRIOV
7570                 struct bnxt_vf_info *vf = &bp->vf;
7571
7572                 vf->fw_fid = le16_to_cpu(resp->fid);
7573                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7574 #endif
7575         }
7576
7577 hwrm_func_qcaps_exit:
7578         mutex_unlock(&bp->hwrm_cmd_lock);
7579         return rc;
7580 }
7581
7582 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7583
7584 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7585 {
7586         int rc;
7587
7588         rc = __bnxt_hwrm_func_qcaps(bp);
7589         if (rc)
7590                 return rc;
7591         rc = bnxt_hwrm_queue_qportcfg(bp);
7592         if (rc) {
7593                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7594                 return rc;
7595         }
7596         if (bp->hwrm_spec_code >= 0x10803) {
7597                 rc = bnxt_alloc_ctx_mem(bp);
7598                 if (rc)
7599                         return rc;
7600                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7601                 if (!rc)
7602                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7603         }
7604         return 0;
7605 }
7606
7607 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7608 {
7609         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7610         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7611         int rc = 0;
7612         u32 flags;
7613
7614         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7615                 return 0;
7616
7617         resp = bp->hwrm_cmd_resp_addr;
7618         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7619
7620         mutex_lock(&bp->hwrm_cmd_lock);
7621         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7622         if (rc)
7623                 goto hwrm_cfa_adv_qcaps_exit;
7624
7625         flags = le32_to_cpu(resp->flags);
7626         if (flags &
7627             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7628                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7629
7630 hwrm_cfa_adv_qcaps_exit:
7631         mutex_unlock(&bp->hwrm_cmd_lock);
7632         return rc;
7633 }
7634
7635 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7636 {
7637         if (bp->fw_health)
7638                 return 0;
7639
7640         bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7641         if (!bp->fw_health)
7642                 return -ENOMEM;
7643
7644         return 0;
7645 }
7646
7647 static int bnxt_alloc_fw_health(struct bnxt *bp)
7648 {
7649         int rc;
7650
7651         if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7652             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7653                 return 0;
7654
7655         rc = __bnxt_alloc_fw_health(bp);
7656         if (rc) {
7657                 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7658                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7659                 return rc;
7660         }
7661
7662         return 0;
7663 }
7664
7665 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7666 {
7667         writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7668                                          BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7669                                          BNXT_FW_HEALTH_WIN_MAP_OFF);
7670 }
7671
7672 bool bnxt_is_fw_healthy(struct bnxt *bp)
7673 {
7674         if (bp->fw_health && bp->fw_health->status_reliable) {
7675                 u32 fw_status;
7676
7677                 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7678                 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7679                         return false;
7680         }
7681
7682         return true;
7683 }
7684
7685 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7686 {
7687         struct bnxt_fw_health *fw_health = bp->fw_health;
7688         u32 reg_type;
7689
7690         if (!fw_health || !fw_health->status_reliable)
7691                 return;
7692
7693         reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7694         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7695                 fw_health->status_reliable = false;
7696 }
7697
7698 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7699 {
7700         void __iomem *hs;
7701         u32 status_loc;
7702         u32 reg_type;
7703         u32 sig;
7704
7705         if (bp->fw_health)
7706                 bp->fw_health->status_reliable = false;
7707
7708         __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7709         hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7710
7711         sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7712         if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7713                 if (!bp->chip_num) {
7714                         __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7715                         bp->chip_num = readl(bp->bar0 +
7716                                              BNXT_FW_HEALTH_WIN_BASE +
7717                                              BNXT_GRC_REG_CHIP_NUM);
7718                 }
7719                 if (!BNXT_CHIP_P5(bp))
7720                         return;
7721
7722                 status_loc = BNXT_GRC_REG_STATUS_P5 |
7723                              BNXT_FW_HEALTH_REG_TYPE_BAR0;
7724         } else {
7725                 status_loc = readl(hs + offsetof(struct hcomm_status,
7726                                                  fw_status_loc));
7727         }
7728
7729         if (__bnxt_alloc_fw_health(bp)) {
7730                 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7731                 return;
7732         }
7733
7734         bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7735         reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7736         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7737                 __bnxt_map_fw_health_reg(bp, status_loc);
7738                 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7739                         BNXT_FW_HEALTH_WIN_OFF(status_loc);
7740         }
7741
7742         bp->fw_health->status_reliable = true;
7743 }
7744
7745 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7746 {
7747         struct bnxt_fw_health *fw_health = bp->fw_health;
7748         u32 reg_base = 0xffffffff;
7749         int i;
7750
7751         bp->fw_health->status_reliable = false;
7752         /* Only pre-map the monitoring GRC registers using window 3 */
7753         for (i = 0; i < 4; i++) {
7754                 u32 reg = fw_health->regs[i];
7755
7756                 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7757                         continue;
7758                 if (reg_base == 0xffffffff)
7759                         reg_base = reg & BNXT_GRC_BASE_MASK;
7760                 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7761                         return -ERANGE;
7762                 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7763         }
7764         bp->fw_health->status_reliable = true;
7765         if (reg_base == 0xffffffff)
7766                 return 0;
7767
7768         __bnxt_map_fw_health_reg(bp, reg_base);
7769         return 0;
7770 }
7771
7772 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7773 {
7774         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7775         struct bnxt_fw_health *fw_health = bp->fw_health;
7776         struct hwrm_error_recovery_qcfg_input req = {0};
7777         int rc, i;
7778
7779         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7780                 return 0;
7781
7782         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7783         mutex_lock(&bp->hwrm_cmd_lock);
7784         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7785         if (rc)
7786                 goto err_recovery_out;
7787         fw_health->flags = le32_to_cpu(resp->flags);
7788         if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7789             !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7790                 rc = -EINVAL;
7791                 goto err_recovery_out;
7792         }
7793         fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7794         fw_health->master_func_wait_dsecs =
7795                 le32_to_cpu(resp->master_func_wait_period);
7796         fw_health->normal_func_wait_dsecs =
7797                 le32_to_cpu(resp->normal_func_wait_period);
7798         fw_health->post_reset_wait_dsecs =
7799                 le32_to_cpu(resp->master_func_wait_period_after_reset);
7800         fw_health->post_reset_max_wait_dsecs =
7801                 le32_to_cpu(resp->max_bailout_time_after_reset);
7802         fw_health->regs[BNXT_FW_HEALTH_REG] =
7803                 le32_to_cpu(resp->fw_health_status_reg);
7804         fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7805                 le32_to_cpu(resp->fw_heartbeat_reg);
7806         fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7807                 le32_to_cpu(resp->fw_reset_cnt_reg);
7808         fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7809                 le32_to_cpu(resp->reset_inprogress_reg);
7810         fw_health->fw_reset_inprog_reg_mask =
7811                 le32_to_cpu(resp->reset_inprogress_reg_mask);
7812         fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7813         if (fw_health->fw_reset_seq_cnt >= 16) {
7814                 rc = -EINVAL;
7815                 goto err_recovery_out;
7816         }
7817         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7818                 fw_health->fw_reset_seq_regs[i] =
7819                         le32_to_cpu(resp->reset_reg[i]);
7820                 fw_health->fw_reset_seq_vals[i] =
7821                         le32_to_cpu(resp->reset_reg_val[i]);
7822                 fw_health->fw_reset_seq_delay_msec[i] =
7823                         resp->delay_after_reset[i];
7824         }
7825 err_recovery_out:
7826         mutex_unlock(&bp->hwrm_cmd_lock);
7827         if (!rc)
7828                 rc = bnxt_map_fw_health_regs(bp);
7829         if (rc)
7830                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7831         return rc;
7832 }
7833
7834 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7835 {
7836         struct hwrm_func_reset_input req = {0};
7837
7838         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7839         req.enables = 0;
7840
7841         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7842 }
7843
7844 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7845 {
7846         struct hwrm_nvm_get_dev_info_output nvm_info;
7847
7848         if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7849                 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7850                          nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7851                          nvm_info.nvm_cfg_ver_upd);
7852 }
7853
7854 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7855 {
7856         int rc = 0;
7857         struct hwrm_queue_qportcfg_input req = {0};
7858         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7859         u8 i, j, *qptr;
7860         bool no_rdma;
7861
7862         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7863
7864         mutex_lock(&bp->hwrm_cmd_lock);
7865         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7866         if (rc)
7867                 goto qportcfg_exit;
7868
7869         if (!resp->max_configurable_queues) {
7870                 rc = -EINVAL;
7871                 goto qportcfg_exit;
7872         }
7873         bp->max_tc = resp->max_configurable_queues;
7874         bp->max_lltc = resp->max_configurable_lossless_queues;
7875         if (bp->max_tc > BNXT_MAX_QUEUE)
7876                 bp->max_tc = BNXT_MAX_QUEUE;
7877
7878         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7879         qptr = &resp->queue_id0;
7880         for (i = 0, j = 0; i < bp->max_tc; i++) {
7881                 bp->q_info[j].queue_id = *qptr;
7882                 bp->q_ids[i] = *qptr++;
7883                 bp->q_info[j].queue_profile = *qptr++;
7884                 bp->tc_to_qidx[j] = j;
7885                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7886                     (no_rdma && BNXT_PF(bp)))
7887                         j++;
7888         }
7889         bp->max_q = bp->max_tc;
7890         bp->max_tc = max_t(u8, j, 1);
7891
7892         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7893                 bp->max_tc = 1;
7894
7895         if (bp->max_lltc > bp->max_tc)
7896                 bp->max_lltc = bp->max_tc;
7897
7898 qportcfg_exit:
7899         mutex_unlock(&bp->hwrm_cmd_lock);
7900         return rc;
7901 }
7902
7903 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7904 {
7905         struct hwrm_ver_get_input req = {0};
7906         int rc;
7907
7908         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7909         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7910         req.hwrm_intf_min = HWRM_VERSION_MINOR;
7911         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7912
7913         rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7914                                    silent);
7915         return rc;
7916 }
7917
7918 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7919 {
7920         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7921         u16 fw_maj, fw_min, fw_bld, fw_rsv;
7922         u32 dev_caps_cfg, hwrm_ver;
7923         int rc, len;
7924
7925         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7926         mutex_lock(&bp->hwrm_cmd_lock);
7927         rc = __bnxt_hwrm_ver_get(bp, false);
7928         if (rc)
7929                 goto hwrm_ver_get_exit;
7930
7931         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7932
7933         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7934                              resp->hwrm_intf_min_8b << 8 |
7935                              resp->hwrm_intf_upd_8b;
7936         if (resp->hwrm_intf_maj_8b < 1) {
7937                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7938                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7939                             resp->hwrm_intf_upd_8b);
7940                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7941         }
7942
7943         hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7944                         HWRM_VERSION_UPDATE;
7945
7946         if (bp->hwrm_spec_code > hwrm_ver)
7947                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7948                          HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7949                          HWRM_VERSION_UPDATE);
7950         else
7951                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7952                          resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7953                          resp->hwrm_intf_upd_8b);
7954
7955         fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7956         if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7957                 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7958                 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7959                 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7960                 len = FW_VER_STR_LEN;
7961         } else {
7962                 fw_maj = resp->hwrm_fw_maj_8b;
7963                 fw_min = resp->hwrm_fw_min_8b;
7964                 fw_bld = resp->hwrm_fw_bld_8b;
7965                 fw_rsv = resp->hwrm_fw_rsvd_8b;
7966                 len = BC_HWRM_STR_LEN;
7967         }
7968         bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7969         snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7970                  fw_rsv);
7971
7972         if (strlen(resp->active_pkg_name)) {
7973                 int fw_ver_len = strlen(bp->fw_ver_str);
7974
7975                 snprintf(bp->fw_ver_str + fw_ver_len,
7976                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7977                          resp->active_pkg_name);
7978                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7979         }
7980
7981         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7982         if (!bp->hwrm_cmd_timeout)
7983                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7984
7985         if (resp->hwrm_intf_maj_8b >= 1) {
7986                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7987                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7988         }
7989         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7990                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7991
7992         bp->chip_num = le16_to_cpu(resp->chip_num);
7993         bp->chip_rev = resp->chip_rev;
7994         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7995             !resp->chip_metal)
7996                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7997
7998         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7999         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8000             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
8001                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8002
8003         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8004                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8005
8006         if (dev_caps_cfg &
8007             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8008                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8009
8010         if (dev_caps_cfg &
8011             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8012                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8013
8014         if (dev_caps_cfg &
8015             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8016                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8017
8018 hwrm_ver_get_exit:
8019         mutex_unlock(&bp->hwrm_cmd_lock);
8020         return rc;
8021 }
8022
8023 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8024 {
8025         struct hwrm_fw_set_time_input req = {0};
8026         struct tm tm;
8027         time64_t now = ktime_get_real_seconds();
8028
8029         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8030             bp->hwrm_spec_code < 0x10400)
8031                 return -EOPNOTSUPP;
8032
8033         time64_to_tm(now, 0, &tm);
8034         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
8035         req.year = cpu_to_le16(1900 + tm.tm_year);
8036         req.month = 1 + tm.tm_mon;
8037         req.day = tm.tm_mday;
8038         req.hour = tm.tm_hour;
8039         req.minute = tm.tm_min;
8040         req.second = tm.tm_sec;
8041         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8042 }
8043
8044 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8045 {
8046         u64 sw_tmp;
8047
8048         hw &= mask;
8049         sw_tmp = (*sw & ~mask) | hw;
8050         if (hw < (*sw & mask))
8051                 sw_tmp += mask + 1;
8052         WRITE_ONCE(*sw, sw_tmp);
8053 }
8054
8055 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8056                                     int count, bool ignore_zero)
8057 {
8058         int i;
8059
8060         for (i = 0; i < count; i++) {
8061                 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8062
8063                 if (ignore_zero && !hw)
8064                         continue;
8065
8066                 if (masks[i] == -1ULL)
8067                         sw_stats[i] = hw;
8068                 else
8069                         bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8070         }
8071 }
8072
8073 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8074 {
8075         if (!stats->hw_stats)
8076                 return;
8077
8078         __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8079                                 stats->hw_masks, stats->len / 8, false);
8080 }
8081
8082 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8083 {
8084         struct bnxt_stats_mem *ring0_stats;
8085         bool ignore_zero = false;
8086         int i;
8087
8088         /* Chip bug.  Counter intermittently becomes 0. */
8089         if (bp->flags & BNXT_FLAG_CHIP_P5)
8090                 ignore_zero = true;
8091
8092         for (i = 0; i < bp->cp_nr_rings; i++) {
8093                 struct bnxt_napi *bnapi = bp->bnapi[i];
8094                 struct bnxt_cp_ring_info *cpr;
8095                 struct bnxt_stats_mem *stats;
8096
8097                 cpr = &bnapi->cp_ring;
8098                 stats = &cpr->stats;
8099                 if (!i)
8100                         ring0_stats = stats;
8101                 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8102                                         ring0_stats->hw_masks,
8103                                         ring0_stats->len / 8, ignore_zero);
8104         }
8105         if (bp->flags & BNXT_FLAG_PORT_STATS) {
8106                 struct bnxt_stats_mem *stats = &bp->port_stats;
8107                 __le64 *hw_stats = stats->hw_stats;
8108                 u64 *sw_stats = stats->sw_stats;
8109                 u64 *masks = stats->hw_masks;
8110                 int cnt;
8111
8112                 cnt = sizeof(struct rx_port_stats) / 8;
8113                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8114
8115                 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8116                 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8117                 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8118                 cnt = sizeof(struct tx_port_stats) / 8;
8119                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8120         }
8121         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8122                 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8123                 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8124         }
8125 }
8126
8127 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8128 {
8129         struct bnxt_pf_info *pf = &bp->pf;
8130         struct hwrm_port_qstats_input req = {0};
8131
8132         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8133                 return 0;
8134
8135         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8136                 return -EOPNOTSUPP;
8137
8138         req.flags = flags;
8139         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
8140         req.port_id = cpu_to_le16(pf->port_id);
8141         req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8142                                             BNXT_TX_PORT_STATS_BYTE_OFFSET);
8143         req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8144         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8145 }
8146
8147 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8148 {
8149         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
8150         struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
8151         struct hwrm_port_qstats_ext_input req = {0};
8152         struct bnxt_pf_info *pf = &bp->pf;
8153         u32 tx_stat_size;
8154         int rc;
8155
8156         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8157                 return 0;
8158
8159         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8160                 return -EOPNOTSUPP;
8161
8162         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
8163         req.flags = flags;
8164         req.port_id = cpu_to_le16(pf->port_id);
8165         req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8166         req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8167         tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8168                        sizeof(struct tx_port_stats_ext) : 0;
8169         req.tx_stat_size = cpu_to_le16(tx_stat_size);
8170         req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8171         mutex_lock(&bp->hwrm_cmd_lock);
8172         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8173         if (!rc) {
8174                 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
8175                 bp->fw_tx_stats_ext_size = tx_stat_size ?
8176                         le16_to_cpu(resp->tx_stat_size) / 8 : 0;
8177         } else {
8178                 bp->fw_rx_stats_ext_size = 0;
8179                 bp->fw_tx_stats_ext_size = 0;
8180         }
8181         if (flags)
8182                 goto qstats_done;
8183
8184         if (bp->fw_tx_stats_ext_size <=
8185             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8186                 mutex_unlock(&bp->hwrm_cmd_lock);
8187                 bp->pri2cos_valid = 0;
8188                 return rc;
8189         }
8190
8191         bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
8192         req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8193
8194         rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
8195         if (!rc) {
8196                 struct hwrm_queue_pri2cos_qcfg_output *resp2;
8197                 u8 *pri2cos;
8198                 int i, j;
8199
8200                 resp2 = bp->hwrm_cmd_resp_addr;
8201                 pri2cos = &resp2->pri0_cos_queue_id;
8202                 for (i = 0; i < 8; i++) {
8203                         u8 queue_id = pri2cos[i];
8204                         u8 queue_idx;
8205
8206                         /* Per port queue IDs start from 0, 10, 20, etc */
8207                         queue_idx = queue_id % 10;
8208                         if (queue_idx > BNXT_MAX_QUEUE) {
8209                                 bp->pri2cos_valid = false;
8210                                 goto qstats_done;
8211                         }
8212                         for (j = 0; j < bp->max_q; j++) {
8213                                 if (bp->q_ids[j] == queue_id)
8214                                         bp->pri2cos_idx[i] = queue_idx;
8215                         }
8216                 }
8217                 bp->pri2cos_valid = 1;
8218         }
8219 qstats_done:
8220         mutex_unlock(&bp->hwrm_cmd_lock);
8221         return rc;
8222 }
8223
8224 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8225 {
8226         if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
8227                 bnxt_hwrm_tunnel_dst_port_free(
8228                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8229         if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
8230                 bnxt_hwrm_tunnel_dst_port_free(
8231                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8232 }
8233
8234 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8235 {
8236         int rc, i;
8237         u32 tpa_flags = 0;
8238
8239         if (set_tpa)
8240                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8241         else if (BNXT_NO_FW_ACCESS(bp))
8242                 return 0;
8243         for (i = 0; i < bp->nr_vnics; i++) {
8244                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8245                 if (rc) {
8246                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8247                                    i, rc);
8248                         return rc;
8249                 }
8250         }
8251         return 0;
8252 }
8253
8254 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8255 {
8256         int i;
8257
8258         for (i = 0; i < bp->nr_vnics; i++)
8259                 bnxt_hwrm_vnic_set_rss(bp, i, false);
8260 }
8261
8262 static void bnxt_clear_vnic(struct bnxt *bp)
8263 {
8264         if (!bp->vnic_info)
8265                 return;
8266
8267         bnxt_hwrm_clear_vnic_filter(bp);
8268         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8269                 /* clear all RSS setting before free vnic ctx */
8270                 bnxt_hwrm_clear_vnic_rss(bp);
8271                 bnxt_hwrm_vnic_ctx_free(bp);
8272         }
8273         /* before free the vnic, undo the vnic tpa settings */
8274         if (bp->flags & BNXT_FLAG_TPA)
8275                 bnxt_set_tpa(bp, false);
8276         bnxt_hwrm_vnic_free(bp);
8277         if (bp->flags & BNXT_FLAG_CHIP_P5)
8278                 bnxt_hwrm_vnic_ctx_free(bp);
8279 }
8280
8281 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8282                                     bool irq_re_init)
8283 {
8284         bnxt_clear_vnic(bp);
8285         bnxt_hwrm_ring_free(bp, close_path);
8286         bnxt_hwrm_ring_grp_free(bp);
8287         if (irq_re_init) {
8288                 bnxt_hwrm_stat_ctx_free(bp);
8289                 bnxt_hwrm_free_tunnel_ports(bp);
8290         }
8291 }
8292
8293 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8294 {
8295         struct hwrm_func_cfg_input req = {0};
8296
8297         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8298         req.fid = cpu_to_le16(0xffff);
8299         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8300         if (br_mode == BRIDGE_MODE_VEB)
8301                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8302         else if (br_mode == BRIDGE_MODE_VEPA)
8303                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8304         else
8305                 return -EINVAL;
8306         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8307 }
8308
8309 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8310 {
8311         struct hwrm_func_cfg_input req = {0};
8312
8313         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8314                 return 0;
8315
8316         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8317         req.fid = cpu_to_le16(0xffff);
8318         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8319         req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8320         if (size == 128)
8321                 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8322
8323         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8324 }
8325
8326 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8327 {
8328         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8329         int rc;
8330
8331         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8332                 goto skip_rss_ctx;
8333
8334         /* allocate context for vnic */
8335         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8336         if (rc) {
8337                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8338                            vnic_id, rc);
8339                 goto vnic_setup_err;
8340         }
8341         bp->rsscos_nr_ctxs++;
8342
8343         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8344                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8345                 if (rc) {
8346                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8347                                    vnic_id, rc);
8348                         goto vnic_setup_err;
8349                 }
8350                 bp->rsscos_nr_ctxs++;
8351         }
8352
8353 skip_rss_ctx:
8354         /* configure default vnic, ring grp */
8355         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8356         if (rc) {
8357                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8358                            vnic_id, rc);
8359                 goto vnic_setup_err;
8360         }
8361
8362         /* Enable RSS hashing on vnic */
8363         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8364         if (rc) {
8365                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8366                            vnic_id, rc);
8367                 goto vnic_setup_err;
8368         }
8369
8370         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8371                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8372                 if (rc) {
8373                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8374                                    vnic_id, rc);
8375                 }
8376         }
8377
8378 vnic_setup_err:
8379         return rc;
8380 }
8381
8382 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8383 {
8384         int rc, i, nr_ctxs;
8385
8386         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8387         for (i = 0; i < nr_ctxs; i++) {
8388                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8389                 if (rc) {
8390                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8391                                    vnic_id, i, rc);
8392                         break;
8393                 }
8394                 bp->rsscos_nr_ctxs++;
8395         }
8396         if (i < nr_ctxs)
8397                 return -ENOMEM;
8398
8399         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8400         if (rc) {
8401                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8402                            vnic_id, rc);
8403                 return rc;
8404         }
8405         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8406         if (rc) {
8407                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8408                            vnic_id, rc);
8409                 return rc;
8410         }
8411         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8412                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8413                 if (rc) {
8414                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8415                                    vnic_id, rc);
8416                 }
8417         }
8418         return rc;
8419 }
8420
8421 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8422 {
8423         if (bp->flags & BNXT_FLAG_CHIP_P5)
8424                 return __bnxt_setup_vnic_p5(bp, vnic_id);
8425         else
8426                 return __bnxt_setup_vnic(bp, vnic_id);
8427 }
8428
8429 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8430 {
8431 #ifdef CONFIG_RFS_ACCEL
8432         int i, rc = 0;
8433
8434         if (bp->flags & BNXT_FLAG_CHIP_P5)
8435                 return 0;
8436
8437         for (i = 0; i < bp->rx_nr_rings; i++) {
8438                 struct bnxt_vnic_info *vnic;
8439                 u16 vnic_id = i + 1;
8440                 u16 ring_id = i;
8441
8442                 if (vnic_id >= bp->nr_vnics)
8443                         break;
8444
8445                 vnic = &bp->vnic_info[vnic_id];
8446                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8447                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8448                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8449                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8450                 if (rc) {
8451                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8452                                    vnic_id, rc);
8453                         break;
8454                 }
8455                 rc = bnxt_setup_vnic(bp, vnic_id);
8456                 if (rc)
8457                         break;
8458         }
8459         return rc;
8460 #else
8461         return 0;
8462 #endif
8463 }
8464
8465 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8466 static bool bnxt_promisc_ok(struct bnxt *bp)
8467 {
8468 #ifdef CONFIG_BNXT_SRIOV
8469         if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8470                 return false;
8471 #endif
8472         return true;
8473 }
8474
8475 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8476 {
8477         unsigned int rc = 0;
8478
8479         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8480         if (rc) {
8481                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8482                            rc);
8483                 return rc;
8484         }
8485
8486         rc = bnxt_hwrm_vnic_cfg(bp, 1);
8487         if (rc) {
8488                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8489                            rc);
8490                 return rc;
8491         }
8492         return rc;
8493 }
8494
8495 static int bnxt_cfg_rx_mode(struct bnxt *);
8496 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8497
8498 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8499 {
8500         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8501         int rc = 0;
8502         unsigned int rx_nr_rings = bp->rx_nr_rings;
8503
8504         if (irq_re_init) {
8505                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8506                 if (rc) {
8507                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8508                                    rc);
8509                         goto err_out;
8510                 }
8511         }
8512
8513         rc = bnxt_hwrm_ring_alloc(bp);
8514         if (rc) {
8515                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8516                 goto err_out;
8517         }
8518
8519         rc = bnxt_hwrm_ring_grp_alloc(bp);
8520         if (rc) {
8521                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8522                 goto err_out;
8523         }
8524
8525         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8526                 rx_nr_rings--;
8527
8528         /* default vnic 0 */
8529         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8530         if (rc) {
8531                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8532                 goto err_out;
8533         }
8534
8535         rc = bnxt_setup_vnic(bp, 0);
8536         if (rc)
8537                 goto err_out;
8538
8539         if (bp->flags & BNXT_FLAG_RFS) {
8540                 rc = bnxt_alloc_rfs_vnics(bp);
8541                 if (rc)
8542                         goto err_out;
8543         }
8544
8545         if (bp->flags & BNXT_FLAG_TPA) {
8546                 rc = bnxt_set_tpa(bp, true);
8547                 if (rc)
8548                         goto err_out;
8549         }
8550
8551         if (BNXT_VF(bp))
8552                 bnxt_update_vf_mac(bp);
8553
8554         /* Filter for default vnic 0 */
8555         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8556         if (rc) {
8557                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8558                 goto err_out;
8559         }
8560         vnic->uc_filter_count = 1;
8561
8562         vnic->rx_mask = 0;
8563         if (bp->dev->flags & IFF_BROADCAST)
8564                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8565
8566         if (bp->dev->flags & IFF_PROMISC)
8567                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8568
8569         if (bp->dev->flags & IFF_ALLMULTI) {
8570                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8571                 vnic->mc_list_count = 0;
8572         } else {
8573                 u32 mask = 0;
8574
8575                 bnxt_mc_list_updated(bp, &mask);
8576                 vnic->rx_mask |= mask;
8577         }
8578
8579         rc = bnxt_cfg_rx_mode(bp);
8580         if (rc)
8581                 goto err_out;
8582
8583         rc = bnxt_hwrm_set_coal(bp);
8584         if (rc)
8585                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8586                                 rc);
8587
8588         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8589                 rc = bnxt_setup_nitroa0_vnic(bp);
8590                 if (rc)
8591                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8592                                    rc);
8593         }
8594
8595         if (BNXT_VF(bp)) {
8596                 bnxt_hwrm_func_qcfg(bp);
8597                 netdev_update_features(bp->dev);
8598         }
8599
8600         return 0;
8601
8602 err_out:
8603         bnxt_hwrm_resource_free(bp, 0, true);
8604
8605         return rc;
8606 }
8607
8608 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8609 {
8610         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8611         return 0;
8612 }
8613
8614 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8615 {
8616         bnxt_init_cp_rings(bp);
8617         bnxt_init_rx_rings(bp);
8618         bnxt_init_tx_rings(bp);
8619         bnxt_init_ring_grps(bp, irq_re_init);
8620         bnxt_init_vnics(bp);
8621
8622         return bnxt_init_chip(bp, irq_re_init);
8623 }
8624
8625 static int bnxt_set_real_num_queues(struct bnxt *bp)
8626 {
8627         int rc;
8628         struct net_device *dev = bp->dev;
8629
8630         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8631                                           bp->tx_nr_rings_xdp);
8632         if (rc)
8633                 return rc;
8634
8635         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8636         if (rc)
8637                 return rc;
8638
8639 #ifdef CONFIG_RFS_ACCEL
8640         if (bp->flags & BNXT_FLAG_RFS)
8641                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8642 #endif
8643
8644         return rc;
8645 }
8646
8647 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8648                            bool shared)
8649 {
8650         int _rx = *rx, _tx = *tx;
8651
8652         if (shared) {
8653                 *rx = min_t(int, _rx, max);
8654                 *tx = min_t(int, _tx, max);
8655         } else {
8656                 if (max < 2)
8657                         return -ENOMEM;
8658
8659                 while (_rx + _tx > max) {
8660                         if (_rx > _tx && _rx > 1)
8661                                 _rx--;
8662                         else if (_tx > 1)
8663                                 _tx--;
8664                 }
8665                 *rx = _rx;
8666                 *tx = _tx;
8667         }
8668         return 0;
8669 }
8670
8671 static void bnxt_setup_msix(struct bnxt *bp)
8672 {
8673         const int len = sizeof(bp->irq_tbl[0].name);
8674         struct net_device *dev = bp->dev;
8675         int tcs, i;
8676
8677         tcs = netdev_get_num_tc(dev);
8678         if (tcs) {
8679                 int i, off, count;
8680
8681                 for (i = 0; i < tcs; i++) {
8682                         count = bp->tx_nr_rings_per_tc;
8683                         off = i * count;
8684                         netdev_set_tc_queue(dev, i, count, off);
8685                 }
8686         }
8687
8688         for (i = 0; i < bp->cp_nr_rings; i++) {
8689                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8690                 char *attr;
8691
8692                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8693                         attr = "TxRx";
8694                 else if (i < bp->rx_nr_rings)
8695                         attr = "rx";
8696                 else
8697                         attr = "tx";
8698
8699                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8700                          attr, i);
8701                 bp->irq_tbl[map_idx].handler = bnxt_msix;
8702         }
8703 }
8704
8705 static void bnxt_setup_inta(struct bnxt *bp)
8706 {
8707         const int len = sizeof(bp->irq_tbl[0].name);
8708
8709         if (netdev_get_num_tc(bp->dev))
8710                 netdev_reset_tc(bp->dev);
8711
8712         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8713                  0);
8714         bp->irq_tbl[0].handler = bnxt_inta;
8715 }
8716
8717 static int bnxt_init_int_mode(struct bnxt *bp);
8718
8719 static int bnxt_setup_int_mode(struct bnxt *bp)
8720 {
8721         int rc;
8722
8723         if (!bp->irq_tbl) {
8724                 rc = bnxt_init_int_mode(bp);
8725                 if (rc || !bp->irq_tbl)
8726                         return rc ?: -ENODEV;
8727         }
8728
8729         if (bp->flags & BNXT_FLAG_USING_MSIX)
8730                 bnxt_setup_msix(bp);
8731         else
8732                 bnxt_setup_inta(bp);
8733
8734         rc = bnxt_set_real_num_queues(bp);
8735         return rc;
8736 }
8737
8738 #ifdef CONFIG_RFS_ACCEL
8739 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8740 {
8741         return bp->hw_resc.max_rsscos_ctxs;
8742 }
8743
8744 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8745 {
8746         return bp->hw_resc.max_vnics;
8747 }
8748 #endif
8749
8750 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8751 {
8752         return bp->hw_resc.max_stat_ctxs;
8753 }
8754
8755 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8756 {
8757         return bp->hw_resc.max_cp_rings;
8758 }
8759
8760 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8761 {
8762         unsigned int cp = bp->hw_resc.max_cp_rings;
8763
8764         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8765                 cp -= bnxt_get_ulp_msix_num(bp);
8766
8767         return cp;
8768 }
8769
8770 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8771 {
8772         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8773
8774         if (bp->flags & BNXT_FLAG_CHIP_P5)
8775                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8776
8777         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8778 }
8779
8780 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8781 {
8782         bp->hw_resc.max_irqs = max_irqs;
8783 }
8784
8785 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8786 {
8787         unsigned int cp;
8788
8789         cp = bnxt_get_max_func_cp_rings_for_en(bp);
8790         if (bp->flags & BNXT_FLAG_CHIP_P5)
8791                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8792         else
8793                 return cp - bp->cp_nr_rings;
8794 }
8795
8796 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8797 {
8798         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8799 }
8800
8801 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8802 {
8803         int max_cp = bnxt_get_max_func_cp_rings(bp);
8804         int max_irq = bnxt_get_max_func_irqs(bp);
8805         int total_req = bp->cp_nr_rings + num;
8806         int max_idx, avail_msix;
8807
8808         max_idx = bp->total_irqs;
8809         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8810                 max_idx = min_t(int, bp->total_irqs, max_cp);
8811         avail_msix = max_idx - bp->cp_nr_rings;
8812         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8813                 return avail_msix;
8814
8815         if (max_irq < total_req) {
8816                 num = max_irq - bp->cp_nr_rings;
8817                 if (num <= 0)
8818                         return 0;
8819         }
8820         return num;
8821 }
8822
8823 static int bnxt_get_num_msix(struct bnxt *bp)
8824 {
8825         if (!BNXT_NEW_RM(bp))
8826                 return bnxt_get_max_func_irqs(bp);
8827
8828         return bnxt_nq_rings_in_use(bp);
8829 }
8830
8831 static int bnxt_init_msix(struct bnxt *bp)
8832 {
8833         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8834         struct msix_entry *msix_ent;
8835
8836         total_vecs = bnxt_get_num_msix(bp);
8837         max = bnxt_get_max_func_irqs(bp);
8838         if (total_vecs > max)
8839                 total_vecs = max;
8840
8841         if (!total_vecs)
8842                 return 0;
8843
8844         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8845         if (!msix_ent)
8846                 return -ENOMEM;
8847
8848         for (i = 0; i < total_vecs; i++) {
8849                 msix_ent[i].entry = i;
8850                 msix_ent[i].vector = 0;
8851         }
8852
8853         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8854                 min = 2;
8855
8856         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8857         ulp_msix = bnxt_get_ulp_msix_num(bp);
8858         if (total_vecs < 0 || total_vecs < ulp_msix) {
8859                 rc = -ENODEV;
8860                 goto msix_setup_exit;
8861         }
8862
8863         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8864         if (bp->irq_tbl) {
8865                 for (i = 0; i < total_vecs; i++)
8866                         bp->irq_tbl[i].vector = msix_ent[i].vector;
8867
8868                 bp->total_irqs = total_vecs;
8869                 /* Trim rings based upon num of vectors allocated */
8870                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8871                                      total_vecs - ulp_msix, min == 1);
8872                 if (rc)
8873                         goto msix_setup_exit;
8874
8875                 bp->cp_nr_rings = (min == 1) ?
8876                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8877                                   bp->tx_nr_rings + bp->rx_nr_rings;
8878
8879         } else {
8880                 rc = -ENOMEM;
8881                 goto msix_setup_exit;
8882         }
8883         bp->flags |= BNXT_FLAG_USING_MSIX;
8884         kfree(msix_ent);
8885         return 0;
8886
8887 msix_setup_exit:
8888         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8889         kfree(bp->irq_tbl);
8890         bp->irq_tbl = NULL;
8891         pci_disable_msix(bp->pdev);
8892         kfree(msix_ent);
8893         return rc;
8894 }
8895
8896 static int bnxt_init_inta(struct bnxt *bp)
8897 {
8898         bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
8899         if (!bp->irq_tbl)
8900                 return -ENOMEM;
8901
8902         bp->total_irqs = 1;
8903         bp->rx_nr_rings = 1;
8904         bp->tx_nr_rings = 1;
8905         bp->cp_nr_rings = 1;
8906         bp->flags |= BNXT_FLAG_SHARED_RINGS;
8907         bp->irq_tbl[0].vector = bp->pdev->irq;
8908         return 0;
8909 }
8910
8911 static int bnxt_init_int_mode(struct bnxt *bp)
8912 {
8913         int rc = -ENODEV;
8914
8915         if (bp->flags & BNXT_FLAG_MSIX_CAP)
8916                 rc = bnxt_init_msix(bp);
8917
8918         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8919                 /* fallback to INTA */
8920                 rc = bnxt_init_inta(bp);
8921         }
8922         return rc;
8923 }
8924
8925 static void bnxt_clear_int_mode(struct bnxt *bp)
8926 {
8927         if (bp->flags & BNXT_FLAG_USING_MSIX)
8928                 pci_disable_msix(bp->pdev);
8929
8930         kfree(bp->irq_tbl);
8931         bp->irq_tbl = NULL;
8932         bp->flags &= ~BNXT_FLAG_USING_MSIX;
8933 }
8934
8935 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8936 {
8937         int tcs = netdev_get_num_tc(bp->dev);
8938         bool irq_cleared = false;
8939         int rc;
8940
8941         if (!bnxt_need_reserve_rings(bp))
8942                 return 0;
8943
8944         if (irq_re_init && BNXT_NEW_RM(bp) &&
8945             bnxt_get_num_msix(bp) != bp->total_irqs) {
8946                 bnxt_ulp_irq_stop(bp);
8947                 bnxt_clear_int_mode(bp);
8948                 irq_cleared = true;
8949         }
8950         rc = __bnxt_reserve_rings(bp);
8951         if (irq_cleared) {
8952                 if (!rc)
8953                         rc = bnxt_init_int_mode(bp);
8954                 bnxt_ulp_irq_restart(bp, rc);
8955         }
8956         if (rc) {
8957                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8958                 return rc;
8959         }
8960         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8961                 netdev_err(bp->dev, "tx ring reservation failure\n");
8962                 netdev_reset_tc(bp->dev);
8963                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8964                 return -ENOMEM;
8965         }
8966         return 0;
8967 }
8968
8969 static void bnxt_free_irq(struct bnxt *bp)
8970 {
8971         struct bnxt_irq *irq;
8972         int i;
8973
8974 #ifdef CONFIG_RFS_ACCEL
8975         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8976         bp->dev->rx_cpu_rmap = NULL;
8977 #endif
8978         if (!bp->irq_tbl || !bp->bnapi)
8979                 return;
8980
8981         for (i = 0; i < bp->cp_nr_rings; i++) {
8982                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8983
8984                 irq = &bp->irq_tbl[map_idx];
8985                 if (irq->requested) {
8986                         if (irq->have_cpumask) {
8987                                 irq_set_affinity_hint(irq->vector, NULL);
8988                                 free_cpumask_var(irq->cpu_mask);
8989                                 irq->have_cpumask = 0;
8990                         }
8991                         free_irq(irq->vector, bp->bnapi[i]);
8992                 }
8993
8994                 irq->requested = 0;
8995         }
8996 }
8997
8998 static int bnxt_request_irq(struct bnxt *bp)
8999 {
9000         int i, j, rc = 0;
9001         unsigned long flags = 0;
9002 #ifdef CONFIG_RFS_ACCEL
9003         struct cpu_rmap *rmap;
9004 #endif
9005
9006         rc = bnxt_setup_int_mode(bp);
9007         if (rc) {
9008                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9009                            rc);
9010                 return rc;
9011         }
9012 #ifdef CONFIG_RFS_ACCEL
9013         rmap = bp->dev->rx_cpu_rmap;
9014 #endif
9015         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9016                 flags = IRQF_SHARED;
9017
9018         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9019                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9020                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9021
9022 #ifdef CONFIG_RFS_ACCEL
9023                 if (rmap && bp->bnapi[i]->rx_ring) {
9024                         rc = irq_cpu_rmap_add(rmap, irq->vector);
9025                         if (rc)
9026                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9027                                             j);
9028                         j++;
9029                 }
9030 #endif
9031                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9032                                  bp->bnapi[i]);
9033                 if (rc)
9034                         break;
9035
9036                 irq->requested = 1;
9037
9038                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9039                         int numa_node = dev_to_node(&bp->pdev->dev);
9040
9041                         irq->have_cpumask = 1;
9042                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9043                                         irq->cpu_mask);
9044                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9045                         if (rc) {
9046                                 netdev_warn(bp->dev,
9047                                             "Set affinity failed, IRQ = %d\n",
9048                                             irq->vector);
9049                                 break;
9050                         }
9051                 }
9052         }
9053         return rc;
9054 }
9055
9056 static void bnxt_del_napi(struct bnxt *bp)
9057 {
9058         int i;
9059
9060         if (!bp->bnapi)
9061                 return;
9062
9063         for (i = 0; i < bp->cp_nr_rings; i++) {
9064                 struct bnxt_napi *bnapi = bp->bnapi[i];
9065
9066                 __netif_napi_del(&bnapi->napi);
9067         }
9068         /* We called __netif_napi_del(), we need
9069          * to respect an RCU grace period before freeing napi structures.
9070          */
9071         synchronize_net();
9072 }
9073
9074 static void bnxt_init_napi(struct bnxt *bp)
9075 {
9076         int i;
9077         unsigned int cp_nr_rings = bp->cp_nr_rings;
9078         struct bnxt_napi *bnapi;
9079
9080         if (bp->flags & BNXT_FLAG_USING_MSIX) {
9081                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9082
9083                 if (bp->flags & BNXT_FLAG_CHIP_P5)
9084                         poll_fn = bnxt_poll_p5;
9085                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9086                         cp_nr_rings--;
9087                 for (i = 0; i < cp_nr_rings; i++) {
9088                         bnapi = bp->bnapi[i];
9089                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
9090                 }
9091                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9092                         bnapi = bp->bnapi[cp_nr_rings];
9093                         netif_napi_add(bp->dev, &bnapi->napi,
9094                                        bnxt_poll_nitroa0, 64);
9095                 }
9096         } else {
9097                 bnapi = bp->bnapi[0];
9098                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
9099         }
9100 }
9101
9102 static void bnxt_disable_napi(struct bnxt *bp)
9103 {
9104         int i;
9105
9106         if (!bp->bnapi ||
9107             test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9108                 return;
9109
9110         for (i = 0; i < bp->cp_nr_rings; i++) {
9111                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9112
9113                 if (bp->bnapi[i]->rx_ring)
9114                         cancel_work_sync(&cpr->dim.work);
9115
9116                 napi_disable(&bp->bnapi[i]->napi);
9117         }
9118 }
9119
9120 static void bnxt_enable_napi(struct bnxt *bp)
9121 {
9122         int i;
9123
9124         clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9125         for (i = 0; i < bp->cp_nr_rings; i++) {
9126                 struct bnxt_napi *bnapi = bp->bnapi[i];
9127                 struct bnxt_cp_ring_info *cpr;
9128
9129                 cpr = &bnapi->cp_ring;
9130                 if (bnapi->in_reset)
9131                         cpr->sw_stats.rx.rx_resets++;
9132                 bnapi->in_reset = false;
9133
9134                 if (bnapi->rx_ring) {
9135                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9136                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9137                 }
9138                 napi_enable(&bnapi->napi);
9139         }
9140 }
9141
9142 void bnxt_tx_disable(struct bnxt *bp)
9143 {
9144         int i;
9145         struct bnxt_tx_ring_info *txr;
9146
9147         if (bp->tx_ring) {
9148                 for (i = 0; i < bp->tx_nr_rings; i++) {
9149                         txr = &bp->tx_ring[i];
9150                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
9151                 }
9152         }
9153         /* Drop carrier first to prevent TX timeout */
9154         netif_carrier_off(bp->dev);
9155         /* Stop all TX queues */
9156         netif_tx_disable(bp->dev);
9157 }
9158
9159 void bnxt_tx_enable(struct bnxt *bp)
9160 {
9161         int i;
9162         struct bnxt_tx_ring_info *txr;
9163
9164         for (i = 0; i < bp->tx_nr_rings; i++) {
9165                 txr = &bp->tx_ring[i];
9166                 txr->dev_state = 0;
9167         }
9168         netif_tx_wake_all_queues(bp->dev);
9169         if (bp->link_info.link_up)
9170                 netif_carrier_on(bp->dev);
9171 }
9172
9173 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9174 {
9175         u8 active_fec = link_info->active_fec_sig_mode &
9176                         PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9177
9178         switch (active_fec) {
9179         default:
9180         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9181                 return "None";
9182         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9183                 return "Clause 74 BaseR";
9184         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9185                 return "Clause 91 RS(528,514)";
9186         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9187                 return "Clause 91 RS544_1XN";
9188         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9189                 return "Clause 91 RS(544,514)";
9190         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9191                 return "Clause 91 RS272_1XN";
9192         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9193                 return "Clause 91 RS(272,257)";
9194         }
9195 }
9196
9197 static void bnxt_report_link(struct bnxt *bp)
9198 {
9199         if (bp->link_info.link_up) {
9200                 const char *signal = "";
9201                 const char *flow_ctrl;
9202                 const char *duplex;
9203                 u32 speed;
9204                 u16 fec;
9205
9206                 netif_carrier_on(bp->dev);
9207                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9208                 if (speed == SPEED_UNKNOWN) {
9209                         netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9210                         return;
9211                 }
9212                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9213                         duplex = "full";
9214                 else
9215                         duplex = "half";
9216                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9217                         flow_ctrl = "ON - receive & transmit";
9218                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9219                         flow_ctrl = "ON - transmit";
9220                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9221                         flow_ctrl = "ON - receive";
9222                 else
9223                         flow_ctrl = "none";
9224                 if (bp->link_info.phy_qcfg_resp.option_flags &
9225                     PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9226                         u8 sig_mode = bp->link_info.active_fec_sig_mode &
9227                                       PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9228                         switch (sig_mode) {
9229                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9230                                 signal = "(NRZ) ";
9231                                 break;
9232                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9233                                 signal = "(PAM4) ";
9234                                 break;
9235                         default:
9236                                 break;
9237                         }
9238                 }
9239                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9240                             speed, signal, duplex, flow_ctrl);
9241                 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9242                         netdev_info(bp->dev, "EEE is %s\n",
9243                                     bp->eee.eee_active ? "active" :
9244                                                          "not active");
9245                 fec = bp->link_info.fec_cfg;
9246                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9247                         netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9248                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9249                                     bnxt_report_fec(&bp->link_info));
9250         } else {
9251                 netif_carrier_off(bp->dev);
9252                 netdev_err(bp->dev, "NIC Link is Down\n");
9253         }
9254 }
9255
9256 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9257 {
9258         if (!resp->supported_speeds_auto_mode &&
9259             !resp->supported_speeds_force_mode &&
9260             !resp->supported_pam4_speeds_auto_mode &&
9261             !resp->supported_pam4_speeds_force_mode)
9262                 return true;
9263         return false;
9264 }
9265
9266 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9267 {
9268         int rc = 0;
9269         struct hwrm_port_phy_qcaps_input req = {0};
9270         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9271         struct bnxt_link_info *link_info = &bp->link_info;
9272
9273         if (bp->hwrm_spec_code < 0x10201)
9274                 return 0;
9275
9276         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
9277
9278         mutex_lock(&bp->hwrm_cmd_lock);
9279         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9280         if (rc)
9281                 goto hwrm_phy_qcaps_exit;
9282
9283         bp->phy_flags = resp->flags;
9284         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9285                 struct ethtool_eee *eee = &bp->eee;
9286                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9287
9288                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9289                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9290                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9291                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9292                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9293         }
9294
9295         if (bp->hwrm_spec_code >= 0x10a01) {
9296                 if (bnxt_phy_qcaps_no_speed(resp)) {
9297                         link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9298                         netdev_warn(bp->dev, "Ethernet link disabled\n");
9299                 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9300                         link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9301                         netdev_info(bp->dev, "Ethernet link enabled\n");
9302                         /* Phy re-enabled, reprobe the speeds */
9303                         link_info->support_auto_speeds = 0;
9304                         link_info->support_pam4_auto_speeds = 0;
9305                 }
9306         }
9307         if (resp->supported_speeds_auto_mode)
9308                 link_info->support_auto_speeds =
9309                         le16_to_cpu(resp->supported_speeds_auto_mode);
9310         if (resp->supported_pam4_speeds_auto_mode)
9311                 link_info->support_pam4_auto_speeds =
9312                         le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9313
9314         bp->port_count = resp->port_cnt;
9315
9316 hwrm_phy_qcaps_exit:
9317         mutex_unlock(&bp->hwrm_cmd_lock);
9318         return rc;
9319 }
9320
9321 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9322 {
9323         u16 diff = advertising ^ supported;
9324
9325         return ((supported | diff) != supported);
9326 }
9327
9328 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9329 {
9330         int rc = 0;
9331         struct bnxt_link_info *link_info = &bp->link_info;
9332         struct hwrm_port_phy_qcfg_input req = {0};
9333         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9334         u8 link_up = link_info->link_up;
9335         bool support_changed = false;
9336
9337         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
9338
9339         mutex_lock(&bp->hwrm_cmd_lock);
9340         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9341         if (rc) {
9342                 mutex_unlock(&bp->hwrm_cmd_lock);
9343                 return rc;
9344         }
9345
9346         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9347         link_info->phy_link_status = resp->link;
9348         link_info->duplex = resp->duplex_cfg;
9349         if (bp->hwrm_spec_code >= 0x10800)
9350                 link_info->duplex = resp->duplex_state;
9351         link_info->pause = resp->pause;
9352         link_info->auto_mode = resp->auto_mode;
9353         link_info->auto_pause_setting = resp->auto_pause;
9354         link_info->lp_pause = resp->link_partner_adv_pause;
9355         link_info->force_pause_setting = resp->force_pause;
9356         link_info->duplex_setting = resp->duplex_cfg;
9357         if (link_info->phy_link_status == BNXT_LINK_LINK)
9358                 link_info->link_speed = le16_to_cpu(resp->link_speed);
9359         else
9360                 link_info->link_speed = 0;
9361         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9362         link_info->force_pam4_link_speed =
9363                 le16_to_cpu(resp->force_pam4_link_speed);
9364         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9365         link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9366         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9367         link_info->auto_pam4_link_speeds =
9368                 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9369         link_info->lp_auto_link_speeds =
9370                 le16_to_cpu(resp->link_partner_adv_speeds);
9371         link_info->lp_auto_pam4_link_speeds =
9372                 resp->link_partner_pam4_adv_speeds;
9373         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9374         link_info->phy_ver[0] = resp->phy_maj;
9375         link_info->phy_ver[1] = resp->phy_min;
9376         link_info->phy_ver[2] = resp->phy_bld;
9377         link_info->media_type = resp->media_type;
9378         link_info->phy_type = resp->phy_type;
9379         link_info->transceiver = resp->xcvr_pkg_type;
9380         link_info->phy_addr = resp->eee_config_phy_addr &
9381                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9382         link_info->module_status = resp->module_status;
9383
9384         if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9385                 struct ethtool_eee *eee = &bp->eee;
9386                 u16 fw_speeds;
9387
9388                 eee->eee_active = 0;
9389                 if (resp->eee_config_phy_addr &
9390                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9391                         eee->eee_active = 1;
9392                         fw_speeds = le16_to_cpu(
9393                                 resp->link_partner_adv_eee_link_speed_mask);
9394                         eee->lp_advertised =
9395                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9396                 }
9397
9398                 /* Pull initial EEE config */
9399                 if (!chng_link_state) {
9400                         if (resp->eee_config_phy_addr &
9401                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9402                                 eee->eee_enabled = 1;
9403
9404                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9405                         eee->advertised =
9406                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9407
9408                         if (resp->eee_config_phy_addr &
9409                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9410                                 __le32 tmr;
9411
9412                                 eee->tx_lpi_enabled = 1;
9413                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9414                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9415                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9416                         }
9417                 }
9418         }
9419
9420         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9421         if (bp->hwrm_spec_code >= 0x10504) {
9422                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9423                 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9424         }
9425         /* TODO: need to add more logic to report VF link */
9426         if (chng_link_state) {
9427                 if (link_info->phy_link_status == BNXT_LINK_LINK)
9428                         link_info->link_up = 1;
9429                 else
9430                         link_info->link_up = 0;
9431                 if (link_up != link_info->link_up)
9432                         bnxt_report_link(bp);
9433         } else {
9434                 /* alwasy link down if not require to update link state */
9435                 link_info->link_up = 0;
9436         }
9437         mutex_unlock(&bp->hwrm_cmd_lock);
9438
9439         if (!BNXT_PHY_CFG_ABLE(bp))
9440                 return 0;
9441
9442         /* Check if any advertised speeds are no longer supported. The caller
9443          * holds the link_lock mutex, so we can modify link_info settings.
9444          */
9445         if (bnxt_support_dropped(link_info->advertising,
9446                                  link_info->support_auto_speeds)) {
9447                 link_info->advertising = link_info->support_auto_speeds;
9448                 support_changed = true;
9449         }
9450         if (bnxt_support_dropped(link_info->advertising_pam4,
9451                                  link_info->support_pam4_auto_speeds)) {
9452                 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9453                 support_changed = true;
9454         }
9455         if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9456                 bnxt_hwrm_set_link_setting(bp, true, false);
9457         return 0;
9458 }
9459
9460 static void bnxt_get_port_module_status(struct bnxt *bp)
9461 {
9462         struct bnxt_link_info *link_info = &bp->link_info;
9463         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9464         u8 module_status;
9465
9466         if (bnxt_update_link(bp, true))
9467                 return;
9468
9469         module_status = link_info->module_status;
9470         switch (module_status) {
9471         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9472         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9473         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9474                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9475                             bp->pf.port_id);
9476                 if (bp->hwrm_spec_code >= 0x10201) {
9477                         netdev_warn(bp->dev, "Module part number %s\n",
9478                                     resp->phy_vendor_partnumber);
9479                 }
9480                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9481                         netdev_warn(bp->dev, "TX is disabled\n");
9482                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9483                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9484         }
9485 }
9486
9487 static void
9488 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9489 {
9490         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9491                 if (bp->hwrm_spec_code >= 0x10201)
9492                         req->auto_pause =
9493                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9494                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9495                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9496                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9497                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9498                 req->enables |=
9499                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9500         } else {
9501                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9502                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9503                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9504                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9505                 req->enables |=
9506                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9507                 if (bp->hwrm_spec_code >= 0x10201) {
9508                         req->auto_pause = req->force_pause;
9509                         req->enables |= cpu_to_le32(
9510                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9511                 }
9512         }
9513 }
9514
9515 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9516 {
9517         if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9518                 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9519                 if (bp->link_info.advertising) {
9520                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9521                         req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9522                 }
9523                 if (bp->link_info.advertising_pam4) {
9524                         req->enables |=
9525                                 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9526                         req->auto_link_pam4_speed_mask =
9527                                 cpu_to_le16(bp->link_info.advertising_pam4);
9528                 }
9529                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9530                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9531         } else {
9532                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9533                 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9534                         req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9535                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9536                 } else {
9537                         req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9538                 }
9539         }
9540
9541         /* tell chimp that the setting takes effect immediately */
9542         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9543 }
9544
9545 int bnxt_hwrm_set_pause(struct bnxt *bp)
9546 {
9547         struct hwrm_port_phy_cfg_input req = {0};
9548         int rc;
9549
9550         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9551         bnxt_hwrm_set_pause_common(bp, &req);
9552
9553         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9554             bp->link_info.force_link_chng)
9555                 bnxt_hwrm_set_link_common(bp, &req);
9556
9557         mutex_lock(&bp->hwrm_cmd_lock);
9558         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9559         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9560                 /* since changing of pause setting doesn't trigger any link
9561                  * change event, the driver needs to update the current pause
9562                  * result upon successfully return of the phy_cfg command
9563                  */
9564                 bp->link_info.pause =
9565                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9566                 bp->link_info.auto_pause_setting = 0;
9567                 if (!bp->link_info.force_link_chng)
9568                         bnxt_report_link(bp);
9569         }
9570         bp->link_info.force_link_chng = false;
9571         mutex_unlock(&bp->hwrm_cmd_lock);
9572         return rc;
9573 }
9574
9575 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9576                               struct hwrm_port_phy_cfg_input *req)
9577 {
9578         struct ethtool_eee *eee = &bp->eee;
9579
9580         if (eee->eee_enabled) {
9581                 u16 eee_speeds;
9582                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9583
9584                 if (eee->tx_lpi_enabled)
9585                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9586                 else
9587                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9588
9589                 req->flags |= cpu_to_le32(flags);
9590                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9591                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9592                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9593         } else {
9594                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9595         }
9596 }
9597
9598 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9599 {
9600         struct hwrm_port_phy_cfg_input req = {0};
9601
9602         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9603         if (set_pause)
9604                 bnxt_hwrm_set_pause_common(bp, &req);
9605
9606         bnxt_hwrm_set_link_common(bp, &req);
9607
9608         if (set_eee)
9609                 bnxt_hwrm_set_eee(bp, &req);
9610         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9611 }
9612
9613 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9614 {
9615         struct hwrm_port_phy_cfg_input req = {0};
9616
9617         if (!BNXT_SINGLE_PF(bp))
9618                 return 0;
9619
9620         if (pci_num_vf(bp->pdev) &&
9621             !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9622                 return 0;
9623
9624         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9625         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9626         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9627 }
9628
9629 static int bnxt_fw_init_one(struct bnxt *bp);
9630
9631 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9632 {
9633 #ifdef CONFIG_TEE_BNXT_FW
9634         int rc = tee_bnxt_fw_load();
9635
9636         if (rc)
9637                 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9638
9639         return rc;
9640 #else
9641         netdev_err(bp->dev, "OP-TEE not supported\n");
9642         return -ENODEV;
9643 #endif
9644 }
9645
9646 static int bnxt_try_recover_fw(struct bnxt *bp)
9647 {
9648         if (bp->fw_health && bp->fw_health->status_reliable) {
9649                 int retry = 0, rc;
9650                 u32 sts;
9651
9652                 mutex_lock(&bp->hwrm_cmd_lock);
9653                 do {
9654                         sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9655                         rc = __bnxt_hwrm_ver_get(bp, true);
9656                         if (!BNXT_FW_IS_BOOTING(sts) &&
9657                             !BNXT_FW_IS_RECOVERING(sts))
9658                                 break;
9659                         retry++;
9660                 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9661                 mutex_unlock(&bp->hwrm_cmd_lock);
9662
9663                 if (!BNXT_FW_IS_HEALTHY(sts)) {
9664                         netdev_err(bp->dev,
9665                                    "Firmware not responding, status: 0x%x\n",
9666                                    sts);
9667                         rc = -ENODEV;
9668                 }
9669                 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9670                         netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9671                         return bnxt_fw_reset_via_optee(bp);
9672                 }
9673                 return rc;
9674         }
9675
9676         return -ENODEV;
9677 }
9678
9679 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9680 {
9681         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9682         struct hwrm_func_drv_if_change_input req = {0};
9683         bool fw_reset = !bp->irq_tbl;
9684         bool resc_reinit = false;
9685         int rc, retry = 0;
9686         u32 flags = 0;
9687
9688         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9689                 return 0;
9690
9691         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9692         if (up)
9693                 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9694         mutex_lock(&bp->hwrm_cmd_lock);
9695         while (retry < BNXT_FW_IF_RETRY) {
9696                 rc = _hwrm_send_message(bp, &req, sizeof(req),
9697                                         HWRM_CMD_TIMEOUT);
9698                 if (rc != -EAGAIN)
9699                         break;
9700
9701                 msleep(50);
9702                 retry++;
9703         }
9704         if (!rc)
9705                 flags = le32_to_cpu(resp->flags);
9706         mutex_unlock(&bp->hwrm_cmd_lock);
9707
9708         if (rc == -EAGAIN)
9709                 return rc;
9710         if (rc && up) {
9711                 rc = bnxt_try_recover_fw(bp);
9712                 fw_reset = true;
9713         }
9714         if (rc)
9715                 return rc;
9716
9717         if (!up) {
9718                 bnxt_inv_fw_health_reg(bp);
9719                 return 0;
9720         }
9721
9722         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9723                 resc_reinit = true;
9724         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9725                 fw_reset = true;
9726         else if (bp->fw_health && !bp->fw_health->status_reliable)
9727                 bnxt_try_map_fw_health_reg(bp);
9728
9729         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9730                 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9731                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9732                 return -ENODEV;
9733         }
9734         if (resc_reinit || fw_reset) {
9735                 if (fw_reset) {
9736                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9737                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9738                                 bnxt_ulp_stop(bp);
9739                         bnxt_free_ctx_mem(bp);
9740                         kfree(bp->ctx);
9741                         bp->ctx = NULL;
9742                         bnxt_dcb_free(bp);
9743                         rc = bnxt_fw_init_one(bp);
9744                         if (rc) {
9745                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9746                                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9747                                 return rc;
9748                         }
9749                         bnxt_clear_int_mode(bp);
9750                         rc = bnxt_init_int_mode(bp);
9751                         if (rc) {
9752                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9753                                 netdev_err(bp->dev, "init int mode failed\n");
9754                                 return rc;
9755                         }
9756                 }
9757                 if (BNXT_NEW_RM(bp)) {
9758                         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9759
9760                         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9761                         if (rc)
9762                                 netdev_err(bp->dev, "resc_qcaps failed\n");
9763
9764                         hw_resc->resv_cp_rings = 0;
9765                         hw_resc->resv_stat_ctxs = 0;
9766                         hw_resc->resv_irqs = 0;
9767                         hw_resc->resv_tx_rings = 0;
9768                         hw_resc->resv_rx_rings = 0;
9769                         hw_resc->resv_hw_ring_grps = 0;
9770                         hw_resc->resv_vnics = 0;
9771                         if (!fw_reset) {
9772                                 bp->tx_nr_rings = 0;
9773                                 bp->rx_nr_rings = 0;
9774                         }
9775                 }
9776         }
9777         return rc;
9778 }
9779
9780 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9781 {
9782         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9783         struct hwrm_port_led_qcaps_input req = {0};
9784         struct bnxt_pf_info *pf = &bp->pf;
9785         int rc;
9786
9787         bp->num_leds = 0;
9788         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9789                 return 0;
9790
9791         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9792         req.port_id = cpu_to_le16(pf->port_id);
9793         mutex_lock(&bp->hwrm_cmd_lock);
9794         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9795         if (rc) {
9796                 mutex_unlock(&bp->hwrm_cmd_lock);
9797                 return rc;
9798         }
9799         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9800                 int i;
9801
9802                 bp->num_leds = resp->num_leds;
9803                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9804                                                  bp->num_leds);
9805                 for (i = 0; i < bp->num_leds; i++) {
9806                         struct bnxt_led_info *led = &bp->leds[i];
9807                         __le16 caps = led->led_state_caps;
9808
9809                         if (!led->led_group_id ||
9810                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
9811                                 bp->num_leds = 0;
9812                                 break;
9813                         }
9814                 }
9815         }
9816         mutex_unlock(&bp->hwrm_cmd_lock);
9817         return 0;
9818 }
9819
9820 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9821 {
9822         struct hwrm_wol_filter_alloc_input req = {0};
9823         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9824         int rc;
9825
9826         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9827         req.port_id = cpu_to_le16(bp->pf.port_id);
9828         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9829         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9830         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9831         mutex_lock(&bp->hwrm_cmd_lock);
9832         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9833         if (!rc)
9834                 bp->wol_filter_id = resp->wol_filter_id;
9835         mutex_unlock(&bp->hwrm_cmd_lock);
9836         return rc;
9837 }
9838
9839 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9840 {
9841         struct hwrm_wol_filter_free_input req = {0};
9842
9843         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9844         req.port_id = cpu_to_le16(bp->pf.port_id);
9845         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9846         req.wol_filter_id = bp->wol_filter_id;
9847         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9848 }
9849
9850 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9851 {
9852         struct hwrm_wol_filter_qcfg_input req = {0};
9853         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9854         u16 next_handle = 0;
9855         int rc;
9856
9857         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
9858         req.port_id = cpu_to_le16(bp->pf.port_id);
9859         req.handle = cpu_to_le16(handle);
9860         mutex_lock(&bp->hwrm_cmd_lock);
9861         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9862         if (!rc) {
9863                 next_handle = le16_to_cpu(resp->next_handle);
9864                 if (next_handle != 0) {
9865                         if (resp->wol_type ==
9866                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9867                                 bp->wol = 1;
9868                                 bp->wol_filter_id = resp->wol_filter_id;
9869                         }
9870                 }
9871         }
9872         mutex_unlock(&bp->hwrm_cmd_lock);
9873         return next_handle;
9874 }
9875
9876 static void bnxt_get_wol_settings(struct bnxt *bp)
9877 {
9878         u16 handle = 0;
9879
9880         bp->wol = 0;
9881         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9882                 return;
9883
9884         do {
9885                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9886         } while (handle && handle != 0xffff);
9887 }
9888
9889 #ifdef CONFIG_BNXT_HWMON
9890 static ssize_t bnxt_show_temp(struct device *dev,
9891                               struct device_attribute *devattr, char *buf)
9892 {
9893         struct hwrm_temp_monitor_query_input req = {0};
9894         struct hwrm_temp_monitor_query_output *resp;
9895         struct bnxt *bp = dev_get_drvdata(dev);
9896         u32 len = 0;
9897         int rc;
9898
9899         resp = bp->hwrm_cmd_resp_addr;
9900         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9901         mutex_lock(&bp->hwrm_cmd_lock);
9902         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9903         if (!rc)
9904                 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9905         mutex_unlock(&bp->hwrm_cmd_lock);
9906         if (rc)
9907                 return rc;
9908         return len;
9909 }
9910 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9911
9912 static struct attribute *bnxt_attrs[] = {
9913         &sensor_dev_attr_temp1_input.dev_attr.attr,
9914         NULL
9915 };
9916 ATTRIBUTE_GROUPS(bnxt);
9917
9918 static void bnxt_hwmon_close(struct bnxt *bp)
9919 {
9920         if (bp->hwmon_dev) {
9921                 hwmon_device_unregister(bp->hwmon_dev);
9922                 bp->hwmon_dev = NULL;
9923         }
9924 }
9925
9926 static void bnxt_hwmon_open(struct bnxt *bp)
9927 {
9928         struct hwrm_temp_monitor_query_input req = {0};
9929         struct pci_dev *pdev = bp->pdev;
9930         int rc;
9931
9932         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9933         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9934         if (rc == -EACCES || rc == -EOPNOTSUPP) {
9935                 bnxt_hwmon_close(bp);
9936                 return;
9937         }
9938
9939         if (bp->hwmon_dev)
9940                 return;
9941
9942         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9943                                                           DRV_MODULE_NAME, bp,
9944                                                           bnxt_groups);
9945         if (IS_ERR(bp->hwmon_dev)) {
9946                 bp->hwmon_dev = NULL;
9947                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9948         }
9949 }
9950 #else
9951 static void bnxt_hwmon_close(struct bnxt *bp)
9952 {
9953 }
9954
9955 static void bnxt_hwmon_open(struct bnxt *bp)
9956 {
9957 }
9958 #endif
9959
9960 static bool bnxt_eee_config_ok(struct bnxt *bp)
9961 {
9962         struct ethtool_eee *eee = &bp->eee;
9963         struct bnxt_link_info *link_info = &bp->link_info;
9964
9965         if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
9966                 return true;
9967
9968         if (eee->eee_enabled) {
9969                 u32 advertising =
9970                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9971
9972                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9973                         eee->eee_enabled = 0;
9974                         return false;
9975                 }
9976                 if (eee->advertised & ~advertising) {
9977                         eee->advertised = advertising & eee->supported;
9978                         return false;
9979                 }
9980         }
9981         return true;
9982 }
9983
9984 static int bnxt_update_phy_setting(struct bnxt *bp)
9985 {
9986         int rc;
9987         bool update_link = false;
9988         bool update_pause = false;
9989         bool update_eee = false;
9990         struct bnxt_link_info *link_info = &bp->link_info;
9991
9992         rc = bnxt_update_link(bp, true);
9993         if (rc) {
9994                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9995                            rc);
9996                 return rc;
9997         }
9998         if (!BNXT_SINGLE_PF(bp))
9999                 return 0;
10000
10001         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10002             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10003             link_info->req_flow_ctrl)
10004                 update_pause = true;
10005         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10006             link_info->force_pause_setting != link_info->req_flow_ctrl)
10007                 update_pause = true;
10008         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10009                 if (BNXT_AUTO_MODE(link_info->auto_mode))
10010                         update_link = true;
10011                 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10012                     link_info->req_link_speed != link_info->force_link_speed)
10013                         update_link = true;
10014                 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10015                          link_info->req_link_speed != link_info->force_pam4_link_speed)
10016                         update_link = true;
10017                 if (link_info->req_duplex != link_info->duplex_setting)
10018                         update_link = true;
10019         } else {
10020                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10021                         update_link = true;
10022                 if (link_info->advertising != link_info->auto_link_speeds ||
10023                     link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10024                         update_link = true;
10025         }
10026
10027         /* The last close may have shutdown the link, so need to call
10028          * PHY_CFG to bring it back up.
10029          */
10030         if (!bp->link_info.link_up)
10031                 update_link = true;
10032
10033         if (!bnxt_eee_config_ok(bp))
10034                 update_eee = true;
10035
10036         if (update_link)
10037                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10038         else if (update_pause)
10039                 rc = bnxt_hwrm_set_pause(bp);
10040         if (rc) {
10041                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10042                            rc);
10043                 return rc;
10044         }
10045
10046         return rc;
10047 }
10048
10049 /* Common routine to pre-map certain register block to different GRC window.
10050  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10051  * in PF and 3 windows in VF that can be customized to map in different
10052  * register blocks.
10053  */
10054 static void bnxt_preset_reg_win(struct bnxt *bp)
10055 {
10056         if (BNXT_PF(bp)) {
10057                 /* CAG registers map to GRC window #4 */
10058                 writel(BNXT_CAG_REG_BASE,
10059                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10060         }
10061 }
10062
10063 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10064
10065 static int bnxt_reinit_after_abort(struct bnxt *bp)
10066 {
10067         int rc;
10068
10069         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10070                 return -EBUSY;
10071
10072         if (bp->dev->reg_state == NETREG_UNREGISTERED)
10073                 return -ENODEV;
10074
10075         rc = bnxt_fw_init_one(bp);
10076         if (!rc) {
10077                 bnxt_clear_int_mode(bp);
10078                 rc = bnxt_init_int_mode(bp);
10079                 if (!rc) {
10080                         clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10081                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10082                 }
10083         }
10084         return rc;
10085 }
10086
10087 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10088 {
10089         int rc = 0;
10090
10091         bnxt_preset_reg_win(bp);
10092         netif_carrier_off(bp->dev);
10093         if (irq_re_init) {
10094                 /* Reserve rings now if none were reserved at driver probe. */
10095                 rc = bnxt_init_dflt_ring_mode(bp);
10096                 if (rc) {
10097                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10098                         return rc;
10099                 }
10100         }
10101         rc = bnxt_reserve_rings(bp, irq_re_init);
10102         if (rc)
10103                 return rc;
10104         if ((bp->flags & BNXT_FLAG_RFS) &&
10105             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10106                 /* disable RFS if falling back to INTA */
10107                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10108                 bp->flags &= ~BNXT_FLAG_RFS;
10109         }
10110
10111         rc = bnxt_alloc_mem(bp, irq_re_init);
10112         if (rc) {
10113                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10114                 goto open_err_free_mem;
10115         }
10116
10117         if (irq_re_init) {
10118                 bnxt_init_napi(bp);
10119                 rc = bnxt_request_irq(bp);
10120                 if (rc) {
10121                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10122                         goto open_err_irq;
10123                 }
10124         }
10125
10126         bnxt_ptp_start(bp);
10127         rc = bnxt_init_nic(bp, irq_re_init);
10128         if (rc) {
10129                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10130                 goto open_err_irq;
10131         }
10132
10133         bnxt_enable_napi(bp);
10134         bnxt_debug_dev_init(bp);
10135
10136         if (link_re_init) {
10137                 mutex_lock(&bp->link_lock);
10138                 rc = bnxt_update_phy_setting(bp);
10139                 mutex_unlock(&bp->link_lock);
10140                 if (rc) {
10141                         netdev_warn(bp->dev, "failed to update phy settings\n");
10142                         if (BNXT_SINGLE_PF(bp)) {
10143                                 bp->link_info.phy_retry = true;
10144                                 bp->link_info.phy_retry_expires =
10145                                         jiffies + 5 * HZ;
10146                         }
10147                 }
10148         }
10149
10150         if (irq_re_init)
10151                 udp_tunnel_nic_reset_ntf(bp->dev);
10152
10153         set_bit(BNXT_STATE_OPEN, &bp->state);
10154         bnxt_enable_int(bp);
10155         /* Enable TX queues */
10156         bnxt_tx_enable(bp);
10157         mod_timer(&bp->timer, jiffies + bp->current_interval);
10158         /* Poll link status and check for SFP+ module status */
10159         bnxt_get_port_module_status(bp);
10160
10161         /* VF-reps may need to be re-opened after the PF is re-opened */
10162         if (BNXT_PF(bp))
10163                 bnxt_vf_reps_open(bp);
10164         return 0;
10165
10166 open_err_irq:
10167         bnxt_del_napi(bp);
10168
10169 open_err_free_mem:
10170         bnxt_free_skbs(bp);
10171         bnxt_free_irq(bp);
10172         bnxt_free_mem(bp, true);
10173         return rc;
10174 }
10175
10176 /* rtnl_lock held */
10177 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10178 {
10179         int rc = 0;
10180
10181         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10182                 rc = -EIO;
10183         if (!rc)
10184                 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10185         if (rc) {
10186                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10187                 dev_close(bp->dev);
10188         }
10189         return rc;
10190 }
10191
10192 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10193  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
10194  * self tests.
10195  */
10196 int bnxt_half_open_nic(struct bnxt *bp)
10197 {
10198         int rc = 0;
10199
10200         rc = bnxt_alloc_mem(bp, false);
10201         if (rc) {
10202                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10203                 goto half_open_err;
10204         }
10205         rc = bnxt_init_nic(bp, false);
10206         if (rc) {
10207                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10208                 goto half_open_err;
10209         }
10210         return 0;
10211
10212 half_open_err:
10213         bnxt_free_skbs(bp);
10214         bnxt_free_mem(bp, false);
10215         dev_close(bp->dev);
10216         return rc;
10217 }
10218
10219 /* rtnl_lock held, this call can only be made after a previous successful
10220  * call to bnxt_half_open_nic().
10221  */
10222 void bnxt_half_close_nic(struct bnxt *bp)
10223 {
10224         bnxt_hwrm_resource_free(bp, false, false);
10225         bnxt_free_skbs(bp);
10226         bnxt_free_mem(bp, false);
10227 }
10228
10229 static void bnxt_reenable_sriov(struct bnxt *bp)
10230 {
10231         if (BNXT_PF(bp)) {
10232                 struct bnxt_pf_info *pf = &bp->pf;
10233                 int n = pf->active_vfs;
10234
10235                 if (n)
10236                         bnxt_cfg_hw_sriov(bp, &n, true);
10237         }
10238 }
10239
10240 static int bnxt_open(struct net_device *dev)
10241 {
10242         struct bnxt *bp = netdev_priv(dev);
10243         int rc;
10244
10245         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10246                 rc = bnxt_reinit_after_abort(bp);
10247                 if (rc) {
10248                         if (rc == -EBUSY)
10249                                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10250                         else
10251                                 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10252                         return -ENODEV;
10253                 }
10254         }
10255
10256         rc = bnxt_hwrm_if_change(bp, true);
10257         if (rc)
10258                 return rc;
10259         rc = __bnxt_open_nic(bp, true, true);
10260         if (rc) {
10261                 bnxt_hwrm_if_change(bp, false);
10262         } else {
10263                 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10264                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10265                                 bnxt_ulp_start(bp, 0);
10266                                 bnxt_reenable_sriov(bp);
10267                         }
10268                 }
10269                 bnxt_hwmon_open(bp);
10270         }
10271
10272         return rc;
10273 }
10274
10275 static bool bnxt_drv_busy(struct bnxt *bp)
10276 {
10277         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10278                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10279 }
10280
10281 static void bnxt_get_ring_stats(struct bnxt *bp,
10282                                 struct rtnl_link_stats64 *stats);
10283
10284 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10285                              bool link_re_init)
10286 {
10287         /* Close the VF-reps before closing PF */
10288         if (BNXT_PF(bp))
10289                 bnxt_vf_reps_close(bp);
10290
10291         /* Change device state to avoid TX queue wake up's */
10292         bnxt_tx_disable(bp);
10293
10294         clear_bit(BNXT_STATE_OPEN, &bp->state);
10295         smp_mb__after_atomic();
10296         while (bnxt_drv_busy(bp))
10297                 msleep(20);
10298
10299         /* Flush rings and and disable interrupts */
10300         bnxt_shutdown_nic(bp, irq_re_init);
10301
10302         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10303
10304         bnxt_debug_dev_exit(bp);
10305         bnxt_disable_napi(bp);
10306         del_timer_sync(&bp->timer);
10307         bnxt_free_skbs(bp);
10308
10309         /* Save ring stats before shutdown */
10310         if (bp->bnapi && irq_re_init)
10311                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10312         if (irq_re_init) {
10313                 bnxt_free_irq(bp);
10314                 bnxt_del_napi(bp);
10315         }
10316         bnxt_free_mem(bp, irq_re_init);
10317 }
10318
10319 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10320 {
10321         int rc = 0;
10322
10323         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10324                 /* If we get here, it means firmware reset is in progress
10325                  * while we are trying to close.  We can safely proceed with
10326                  * the close because we are holding rtnl_lock().  Some firmware
10327                  * messages may fail as we proceed to close.  We set the
10328                  * ABORT_ERR flag here so that the FW reset thread will later
10329                  * abort when it gets the rtnl_lock() and sees the flag.
10330                  */
10331                 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10332                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10333         }
10334
10335 #ifdef CONFIG_BNXT_SRIOV
10336         if (bp->sriov_cfg) {
10337                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10338                                                       !bp->sriov_cfg,
10339                                                       BNXT_SRIOV_CFG_WAIT_TMO);
10340                 if (rc)
10341                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10342         }
10343 #endif
10344         __bnxt_close_nic(bp, irq_re_init, link_re_init);
10345         return rc;
10346 }
10347
10348 static int bnxt_close(struct net_device *dev)
10349 {
10350         struct bnxt *bp = netdev_priv(dev);
10351
10352         bnxt_hwmon_close(bp);
10353         bnxt_close_nic(bp, true, true);
10354         bnxt_hwrm_shutdown_link(bp);
10355         bnxt_hwrm_if_change(bp, false);
10356         return 0;
10357 }
10358
10359 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10360                                    u16 *val)
10361 {
10362         struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
10363         struct hwrm_port_phy_mdio_read_input req = {0};
10364         int rc;
10365
10366         if (bp->hwrm_spec_code < 0x10a00)
10367                 return -EOPNOTSUPP;
10368
10369         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
10370         req.port_id = cpu_to_le16(bp->pf.port_id);
10371         req.phy_addr = phy_addr;
10372         req.reg_addr = cpu_to_le16(reg & 0x1f);
10373         if (mdio_phy_id_is_c45(phy_addr)) {
10374                 req.cl45_mdio = 1;
10375                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10376                 req.dev_addr = mdio_phy_id_devad(phy_addr);
10377                 req.reg_addr = cpu_to_le16(reg);
10378         }
10379
10380         mutex_lock(&bp->hwrm_cmd_lock);
10381         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10382         if (!rc)
10383                 *val = le16_to_cpu(resp->reg_data);
10384         mutex_unlock(&bp->hwrm_cmd_lock);
10385         return rc;
10386 }
10387
10388 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10389                                     u16 val)
10390 {
10391         struct hwrm_port_phy_mdio_write_input req = {0};
10392
10393         if (bp->hwrm_spec_code < 0x10a00)
10394                 return -EOPNOTSUPP;
10395
10396         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
10397         req.port_id = cpu_to_le16(bp->pf.port_id);
10398         req.phy_addr = phy_addr;
10399         req.reg_addr = cpu_to_le16(reg & 0x1f);
10400         if (mdio_phy_id_is_c45(phy_addr)) {
10401                 req.cl45_mdio = 1;
10402                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10403                 req.dev_addr = mdio_phy_id_devad(phy_addr);
10404                 req.reg_addr = cpu_to_le16(reg);
10405         }
10406         req.reg_data = cpu_to_le16(val);
10407
10408         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10409 }
10410
10411 /* rtnl_lock held */
10412 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10413 {
10414         struct mii_ioctl_data *mdio = if_mii(ifr);
10415         struct bnxt *bp = netdev_priv(dev);
10416         int rc;
10417
10418         switch (cmd) {
10419         case SIOCGMIIPHY:
10420                 mdio->phy_id = bp->link_info.phy_addr;
10421
10422                 fallthrough;
10423         case SIOCGMIIREG: {
10424                 u16 mii_regval = 0;
10425
10426                 if (!netif_running(dev))
10427                         return -EAGAIN;
10428
10429                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10430                                              &mii_regval);
10431                 mdio->val_out = mii_regval;
10432                 return rc;
10433         }
10434
10435         case SIOCSMIIREG:
10436                 if (!netif_running(dev))
10437                         return -EAGAIN;
10438
10439                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10440                                                 mdio->val_in);
10441
10442         case SIOCSHWTSTAMP:
10443                 return bnxt_hwtstamp_set(dev, ifr);
10444
10445         case SIOCGHWTSTAMP:
10446                 return bnxt_hwtstamp_get(dev, ifr);
10447
10448         default:
10449                 /* do nothing */
10450                 break;
10451         }
10452         return -EOPNOTSUPP;
10453 }
10454
10455 static void bnxt_get_ring_stats(struct bnxt *bp,
10456                                 struct rtnl_link_stats64 *stats)
10457 {
10458         int i;
10459
10460         for (i = 0; i < bp->cp_nr_rings; i++) {
10461                 struct bnxt_napi *bnapi = bp->bnapi[i];
10462                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10463                 u64 *sw = cpr->stats.sw_stats;
10464
10465                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10466                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10467                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10468
10469                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10470                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10471                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10472
10473                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10474                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10475                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10476
10477                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10478                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10479                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10480
10481                 stats->rx_missed_errors +=
10482                         BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10483
10484                 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10485
10486                 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10487         }
10488 }
10489
10490 static void bnxt_add_prev_stats(struct bnxt *bp,
10491                                 struct rtnl_link_stats64 *stats)
10492 {
10493         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10494
10495         stats->rx_packets += prev_stats->rx_packets;
10496         stats->tx_packets += prev_stats->tx_packets;
10497         stats->rx_bytes += prev_stats->rx_bytes;
10498         stats->tx_bytes += prev_stats->tx_bytes;
10499         stats->rx_missed_errors += prev_stats->rx_missed_errors;
10500         stats->multicast += prev_stats->multicast;
10501         stats->tx_dropped += prev_stats->tx_dropped;
10502 }
10503
10504 static void
10505 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10506 {
10507         struct bnxt *bp = netdev_priv(dev);
10508
10509         set_bit(BNXT_STATE_READ_STATS, &bp->state);
10510         /* Make sure bnxt_close_nic() sees that we are reading stats before
10511          * we check the BNXT_STATE_OPEN flag.
10512          */
10513         smp_mb__after_atomic();
10514         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10515                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10516                 *stats = bp->net_stats_prev;
10517                 return;
10518         }
10519
10520         bnxt_get_ring_stats(bp, stats);
10521         bnxt_add_prev_stats(bp, stats);
10522
10523         if (bp->flags & BNXT_FLAG_PORT_STATS) {
10524                 u64 *rx = bp->port_stats.sw_stats;
10525                 u64 *tx = bp->port_stats.sw_stats +
10526                           BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10527
10528                 stats->rx_crc_errors =
10529                         BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10530                 stats->rx_frame_errors =
10531                         BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10532                 stats->rx_length_errors =
10533                         BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10534                         BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10535                         BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10536                 stats->rx_errors =
10537                         BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10538                         BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10539                 stats->collisions =
10540                         BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10541                 stats->tx_fifo_errors =
10542                         BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10543                 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10544         }
10545         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10546 }
10547
10548 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10549 {
10550         struct net_device *dev = bp->dev;
10551         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10552         struct netdev_hw_addr *ha;
10553         u8 *haddr;
10554         int mc_count = 0;
10555         bool update = false;
10556         int off = 0;
10557
10558         netdev_for_each_mc_addr(ha, dev) {
10559                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10560                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10561                         vnic->mc_list_count = 0;
10562                         return false;
10563                 }
10564                 haddr = ha->addr;
10565                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10566                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10567                         update = true;
10568                 }
10569                 off += ETH_ALEN;
10570                 mc_count++;
10571         }
10572         if (mc_count)
10573                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10574
10575         if (mc_count != vnic->mc_list_count) {
10576                 vnic->mc_list_count = mc_count;
10577                 update = true;
10578         }
10579         return update;
10580 }
10581
10582 static bool bnxt_uc_list_updated(struct bnxt *bp)
10583 {
10584         struct net_device *dev = bp->dev;
10585         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10586         struct netdev_hw_addr *ha;
10587         int off = 0;
10588
10589         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10590                 return true;
10591
10592         netdev_for_each_uc_addr(ha, dev) {
10593                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10594                         return true;
10595
10596                 off += ETH_ALEN;
10597         }
10598         return false;
10599 }
10600
10601 static void bnxt_set_rx_mode(struct net_device *dev)
10602 {
10603         struct bnxt *bp = netdev_priv(dev);
10604         struct bnxt_vnic_info *vnic;
10605         bool mc_update = false;
10606         bool uc_update;
10607         u32 mask;
10608
10609         if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10610                 return;
10611
10612         vnic = &bp->vnic_info[0];
10613         mask = vnic->rx_mask;
10614         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10615                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10616                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10617                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10618
10619         if (dev->flags & IFF_PROMISC)
10620                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10621
10622         uc_update = bnxt_uc_list_updated(bp);
10623
10624         if (dev->flags & IFF_BROADCAST)
10625                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10626         if (dev->flags & IFF_ALLMULTI) {
10627                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10628                 vnic->mc_list_count = 0;
10629         } else {
10630                 mc_update = bnxt_mc_list_updated(bp, &mask);
10631         }
10632
10633         if (mask != vnic->rx_mask || uc_update || mc_update) {
10634                 vnic->rx_mask = mask;
10635
10636                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10637                 bnxt_queue_sp_work(bp);
10638         }
10639 }
10640
10641 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10642 {
10643         struct net_device *dev = bp->dev;
10644         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10645         struct netdev_hw_addr *ha;
10646         int i, off = 0, rc;
10647         bool uc_update;
10648
10649         netif_addr_lock_bh(dev);
10650         uc_update = bnxt_uc_list_updated(bp);
10651         netif_addr_unlock_bh(dev);
10652
10653         if (!uc_update)
10654                 goto skip_uc;
10655
10656         mutex_lock(&bp->hwrm_cmd_lock);
10657         for (i = 1; i < vnic->uc_filter_count; i++) {
10658                 struct hwrm_cfa_l2_filter_free_input req = {0};
10659
10660                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10661                                        -1);
10662
10663                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
10664
10665                 rc = _hwrm_send_message(bp, &req, sizeof(req),
10666                                         HWRM_CMD_TIMEOUT);
10667         }
10668         mutex_unlock(&bp->hwrm_cmd_lock);
10669
10670         vnic->uc_filter_count = 1;
10671
10672         netif_addr_lock_bh(dev);
10673         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10674                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10675         } else {
10676                 netdev_for_each_uc_addr(ha, dev) {
10677                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10678                         off += ETH_ALEN;
10679                         vnic->uc_filter_count++;
10680                 }
10681         }
10682         netif_addr_unlock_bh(dev);
10683
10684         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10685                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10686                 if (rc) {
10687                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10688                                    rc);
10689                         vnic->uc_filter_count = i;
10690                         return rc;
10691                 }
10692         }
10693
10694 skip_uc:
10695         if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10696             !bnxt_promisc_ok(bp))
10697                 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10698         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10699         if (rc && vnic->mc_list_count) {
10700                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10701                             rc);
10702                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10703                 vnic->mc_list_count = 0;
10704                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10705         }
10706         if (rc)
10707                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10708                            rc);
10709
10710         return rc;
10711 }
10712
10713 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10714 {
10715 #ifdef CONFIG_BNXT_SRIOV
10716         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10717                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10718
10719                 /* No minimum rings were provisioned by the PF.  Don't
10720                  * reserve rings by default when device is down.
10721                  */
10722                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10723                         return true;
10724
10725                 if (!netif_running(bp->dev))
10726                         return false;
10727         }
10728 #endif
10729         return true;
10730 }
10731
10732 /* If the chip and firmware supports RFS */
10733 static bool bnxt_rfs_supported(struct bnxt *bp)
10734 {
10735         if (bp->flags & BNXT_FLAG_CHIP_P5) {
10736                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10737                         return true;
10738                 return false;
10739         }
10740         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10741                 return true;
10742         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10743                 return true;
10744         return false;
10745 }
10746
10747 /* If runtime conditions support RFS */
10748 static bool bnxt_rfs_capable(struct bnxt *bp)
10749 {
10750 #ifdef CONFIG_RFS_ACCEL
10751         int vnics, max_vnics, max_rss_ctxs;
10752
10753         if (bp->flags & BNXT_FLAG_CHIP_P5)
10754                 return bnxt_rfs_supported(bp);
10755         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10756                 return false;
10757
10758         vnics = 1 + bp->rx_nr_rings;
10759         max_vnics = bnxt_get_max_func_vnics(bp);
10760         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10761
10762         /* RSS contexts not a limiting factor */
10763         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10764                 max_rss_ctxs = max_vnics;
10765         if (vnics > max_vnics || vnics > max_rss_ctxs) {
10766                 if (bp->rx_nr_rings > 1)
10767                         netdev_warn(bp->dev,
10768                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10769                                     min(max_rss_ctxs - 1, max_vnics - 1));
10770                 return false;
10771         }
10772
10773         if (!BNXT_NEW_RM(bp))
10774                 return true;
10775
10776         if (vnics == bp->hw_resc.resv_vnics)
10777                 return true;
10778
10779         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10780         if (vnics <= bp->hw_resc.resv_vnics)
10781                 return true;
10782
10783         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10784         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10785         return false;
10786 #else
10787         return false;
10788 #endif
10789 }
10790
10791 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10792                                            netdev_features_t features)
10793 {
10794         struct bnxt *bp = netdev_priv(dev);
10795         netdev_features_t vlan_features;
10796
10797         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10798                 features &= ~NETIF_F_NTUPLE;
10799
10800         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10801                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10802
10803         if (!(features & NETIF_F_GRO))
10804                 features &= ~NETIF_F_GRO_HW;
10805
10806         if (features & NETIF_F_GRO_HW)
10807                 features &= ~NETIF_F_LRO;
10808
10809         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10810          * turned on or off together.
10811          */
10812         vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10813         if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10814                 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10815                         features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10816                 else if (vlan_features)
10817                         features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10818         }
10819 #ifdef CONFIG_BNXT_SRIOV
10820         if (BNXT_VF(bp) && bp->vf.vlan)
10821                 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10822 #endif
10823         return features;
10824 }
10825
10826 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10827 {
10828         struct bnxt *bp = netdev_priv(dev);
10829         u32 flags = bp->flags;
10830         u32 changes;
10831         int rc = 0;
10832         bool re_init = false;
10833         bool update_tpa = false;
10834
10835         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10836         if (features & NETIF_F_GRO_HW)
10837                 flags |= BNXT_FLAG_GRO;
10838         else if (features & NETIF_F_LRO)
10839                 flags |= BNXT_FLAG_LRO;
10840
10841         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10842                 flags &= ~BNXT_FLAG_TPA;
10843
10844         if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10845                 flags |= BNXT_FLAG_STRIP_VLAN;
10846
10847         if (features & NETIF_F_NTUPLE)
10848                 flags |= BNXT_FLAG_RFS;
10849
10850         changes = flags ^ bp->flags;
10851         if (changes & BNXT_FLAG_TPA) {
10852                 update_tpa = true;
10853                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10854                     (flags & BNXT_FLAG_TPA) == 0 ||
10855                     (bp->flags & BNXT_FLAG_CHIP_P5))
10856                         re_init = true;
10857         }
10858
10859         if (changes & ~BNXT_FLAG_TPA)
10860                 re_init = true;
10861
10862         if (flags != bp->flags) {
10863                 u32 old_flags = bp->flags;
10864
10865                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10866                         bp->flags = flags;
10867                         if (update_tpa)
10868                                 bnxt_set_ring_params(bp);
10869                         return rc;
10870                 }
10871
10872                 if (re_init) {
10873                         bnxt_close_nic(bp, false, false);
10874                         bp->flags = flags;
10875                         if (update_tpa)
10876                                 bnxt_set_ring_params(bp);
10877
10878                         return bnxt_open_nic(bp, false, false);
10879                 }
10880                 if (update_tpa) {
10881                         bp->flags = flags;
10882                         rc = bnxt_set_tpa(bp,
10883                                           (flags & BNXT_FLAG_TPA) ?
10884                                           true : false);
10885                         if (rc)
10886                                 bp->flags = old_flags;
10887                 }
10888         }
10889         return rc;
10890 }
10891
10892 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
10893                               u8 **nextp)
10894 {
10895         struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
10896         int hdr_count = 0;
10897         u8 *nexthdr;
10898         int start;
10899
10900         /* Check that there are at most 2 IPv6 extension headers, no
10901          * fragment header, and each is <= 64 bytes.
10902          */
10903         start = nw_off + sizeof(*ip6h);
10904         nexthdr = &ip6h->nexthdr;
10905         while (ipv6_ext_hdr(*nexthdr)) {
10906                 struct ipv6_opt_hdr *hp;
10907                 int hdrlen;
10908
10909                 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
10910                     *nexthdr == NEXTHDR_FRAGMENT)
10911                         return false;
10912                 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
10913                                           skb_headlen(skb), NULL);
10914                 if (!hp)
10915                         return false;
10916                 if (*nexthdr == NEXTHDR_AUTH)
10917                         hdrlen = ipv6_authlen(hp);
10918                 else
10919                         hdrlen = ipv6_optlen(hp);
10920
10921                 if (hdrlen > 64)
10922                         return false;
10923                 nexthdr = &hp->nexthdr;
10924                 start += hdrlen;
10925                 hdr_count++;
10926         }
10927         if (nextp) {
10928                 /* Caller will check inner protocol */
10929                 if (skb->encapsulation) {
10930                         *nextp = nexthdr;
10931                         return true;
10932                 }
10933                 *nextp = NULL;
10934         }
10935         /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
10936         return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
10937 }
10938
10939 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
10940 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
10941 {
10942         struct udphdr *uh = udp_hdr(skb);
10943         __be16 udp_port = uh->dest;
10944
10945         if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
10946                 return false;
10947         if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
10948                 struct ethhdr *eh = inner_eth_hdr(skb);
10949
10950                 switch (eh->h_proto) {
10951                 case htons(ETH_P_IP):
10952                         return true;
10953                 case htons(ETH_P_IPV6):
10954                         return bnxt_exthdr_check(bp, skb,
10955                                                  skb_inner_network_offset(skb),
10956                                                  NULL);
10957                 }
10958         }
10959         return false;
10960 }
10961
10962 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
10963 {
10964         switch (l4_proto) {
10965         case IPPROTO_UDP:
10966                 return bnxt_udp_tunl_check(bp, skb);
10967         case IPPROTO_IPIP:
10968                 return true;
10969         case IPPROTO_GRE: {
10970                 switch (skb->inner_protocol) {
10971                 default:
10972                         return false;
10973                 case htons(ETH_P_IP):
10974                         return true;
10975                 case htons(ETH_P_IPV6):
10976                         fallthrough;
10977                 }
10978         }
10979         case IPPROTO_IPV6:
10980                 /* Check ext headers of inner ipv6 */
10981                 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
10982                                          NULL);
10983         }
10984         return false;
10985 }
10986
10987 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
10988                                              struct net_device *dev,
10989                                              netdev_features_t features)
10990 {
10991         struct bnxt *bp = netdev_priv(dev);
10992         u8 *l4_proto;
10993
10994         features = vlan_features_check(skb, features);
10995         switch (vlan_get_protocol(skb)) {
10996         case htons(ETH_P_IP):
10997                 if (!skb->encapsulation)
10998                         return features;
10999                 l4_proto = &ip_hdr(skb)->protocol;
11000                 if (bnxt_tunl_check(bp, skb, *l4_proto))
11001                         return features;
11002                 break;
11003         case htons(ETH_P_IPV6):
11004                 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11005                                        &l4_proto))
11006                         break;
11007                 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11008                         return features;
11009                 break;
11010         }
11011         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11012 }
11013
11014 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11015                          u32 *reg_buf)
11016 {
11017         struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
11018         struct hwrm_dbg_read_direct_input req = {0};
11019         __le32 *dbg_reg_buf;
11020         dma_addr_t mapping;
11021         int rc, i;
11022
11023         dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
11024                                          &mapping, GFP_KERNEL);
11025         if (!dbg_reg_buf)
11026                 return -ENOMEM;
11027         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
11028         req.host_dest_addr = cpu_to_le64(mapping);
11029         req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11030         req.read_len32 = cpu_to_le32(num_words);
11031         mutex_lock(&bp->hwrm_cmd_lock);
11032         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11033         if (rc || resp->error_code) {
11034                 rc = -EIO;
11035                 goto dbg_rd_reg_exit;
11036         }
11037         for (i = 0; i < num_words; i++)
11038                 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11039
11040 dbg_rd_reg_exit:
11041         mutex_unlock(&bp->hwrm_cmd_lock);
11042         dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
11043         return rc;
11044 }
11045
11046 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11047                                        u32 ring_id, u32 *prod, u32 *cons)
11048 {
11049         struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
11050         struct hwrm_dbg_ring_info_get_input req = {0};
11051         int rc;
11052
11053         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
11054         req.ring_type = ring_type;
11055         req.fw_ring_id = cpu_to_le32(ring_id);
11056         mutex_lock(&bp->hwrm_cmd_lock);
11057         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11058         if (!rc) {
11059                 *prod = le32_to_cpu(resp->producer_index);
11060                 *cons = le32_to_cpu(resp->consumer_index);
11061         }
11062         mutex_unlock(&bp->hwrm_cmd_lock);
11063         return rc;
11064 }
11065
11066 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11067 {
11068         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11069         int i = bnapi->index;
11070
11071         if (!txr)
11072                 return;
11073
11074         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11075                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11076                     txr->tx_cons);
11077 }
11078
11079 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11080 {
11081         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11082         int i = bnapi->index;
11083
11084         if (!rxr)
11085                 return;
11086
11087         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11088                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11089                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11090                     rxr->rx_sw_agg_prod);
11091 }
11092
11093 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11094 {
11095         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11096         int i = bnapi->index;
11097
11098         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11099                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11100 }
11101
11102 static void bnxt_dbg_dump_states(struct bnxt *bp)
11103 {
11104         int i;
11105         struct bnxt_napi *bnapi;
11106
11107         for (i = 0; i < bp->cp_nr_rings; i++) {
11108                 bnapi = bp->bnapi[i];
11109                 if (netif_msg_drv(bp)) {
11110                         bnxt_dump_tx_sw_state(bnapi);
11111                         bnxt_dump_rx_sw_state(bnapi);
11112                         bnxt_dump_cp_sw_state(bnapi);
11113                 }
11114         }
11115 }
11116
11117 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11118 {
11119         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11120         struct hwrm_ring_reset_input req = {0};
11121         struct bnxt_napi *bnapi = rxr->bnapi;
11122         struct bnxt_cp_ring_info *cpr;
11123         u16 cp_ring_id;
11124
11125         cpr = &bnapi->cp_ring;
11126         cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11127         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
11128         req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11129         req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11130         return hwrm_send_message_silent(bp, &req, sizeof(req),
11131                                         HWRM_CMD_TIMEOUT);
11132 }
11133
11134 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11135 {
11136         if (!silent)
11137                 bnxt_dbg_dump_states(bp);
11138         if (netif_running(bp->dev)) {
11139                 int rc;
11140
11141                 if (silent) {
11142                         bnxt_close_nic(bp, false, false);
11143                         bnxt_open_nic(bp, false, false);
11144                 } else {
11145                         bnxt_ulp_stop(bp);
11146                         bnxt_close_nic(bp, true, false);
11147                         rc = bnxt_open_nic(bp, true, false);
11148                         bnxt_ulp_start(bp, rc);
11149                 }
11150         }
11151 }
11152
11153 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11154 {
11155         struct bnxt *bp = netdev_priv(dev);
11156
11157         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
11158         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11159         bnxt_queue_sp_work(bp);
11160 }
11161
11162 static void bnxt_fw_health_check(struct bnxt *bp)
11163 {
11164         struct bnxt_fw_health *fw_health = bp->fw_health;
11165         u32 val;
11166
11167         if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11168                 return;
11169
11170         if (fw_health->tmr_counter) {
11171                 fw_health->tmr_counter--;
11172                 return;
11173         }
11174
11175         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11176         if (val == fw_health->last_fw_heartbeat)
11177                 goto fw_reset;
11178
11179         fw_health->last_fw_heartbeat = val;
11180
11181         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11182         if (val != fw_health->last_fw_reset_cnt)
11183                 goto fw_reset;
11184
11185         fw_health->tmr_counter = fw_health->tmr_multiplier;
11186         return;
11187
11188 fw_reset:
11189         set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11190         bnxt_queue_sp_work(bp);
11191 }
11192
11193 static void bnxt_timer(struct timer_list *t)
11194 {
11195         struct bnxt *bp = from_timer(bp, t, timer);
11196         struct net_device *dev = bp->dev;
11197
11198         if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11199                 return;
11200
11201         if (atomic_read(&bp->intr_sem) != 0)
11202                 goto bnxt_restart_timer;
11203
11204         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11205                 bnxt_fw_health_check(bp);
11206
11207         if (bp->link_info.link_up && bp->stats_coal_ticks) {
11208                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11209                 bnxt_queue_sp_work(bp);
11210         }
11211
11212         if (bnxt_tc_flower_enabled(bp)) {
11213                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11214                 bnxt_queue_sp_work(bp);
11215         }
11216
11217 #ifdef CONFIG_RFS_ACCEL
11218         if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11219                 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11220                 bnxt_queue_sp_work(bp);
11221         }
11222 #endif /*CONFIG_RFS_ACCEL*/
11223
11224         if (bp->link_info.phy_retry) {
11225                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11226                         bp->link_info.phy_retry = false;
11227                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11228                 } else {
11229                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11230                         bnxt_queue_sp_work(bp);
11231                 }
11232         }
11233
11234         if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11235             netif_carrier_ok(dev)) {
11236                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11237                 bnxt_queue_sp_work(bp);
11238         }
11239 bnxt_restart_timer:
11240         mod_timer(&bp->timer, jiffies + bp->current_interval);
11241 }
11242
11243 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11244 {
11245         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11246          * set.  If the device is being closed, bnxt_close() may be holding
11247          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
11248          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11249          */
11250         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11251         rtnl_lock();
11252 }
11253
11254 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11255 {
11256         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11257         rtnl_unlock();
11258 }
11259
11260 /* Only called from bnxt_sp_task() */
11261 static void bnxt_reset(struct bnxt *bp, bool silent)
11262 {
11263         bnxt_rtnl_lock_sp(bp);
11264         if (test_bit(BNXT_STATE_OPEN, &bp->state))
11265                 bnxt_reset_task(bp, silent);
11266         bnxt_rtnl_unlock_sp(bp);
11267 }
11268
11269 /* Only called from bnxt_sp_task() */
11270 static void bnxt_rx_ring_reset(struct bnxt *bp)
11271 {
11272         int i;
11273
11274         bnxt_rtnl_lock_sp(bp);
11275         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11276                 bnxt_rtnl_unlock_sp(bp);
11277                 return;
11278         }
11279         /* Disable and flush TPA before resetting the RX ring */
11280         if (bp->flags & BNXT_FLAG_TPA)
11281                 bnxt_set_tpa(bp, false);
11282         for (i = 0; i < bp->rx_nr_rings; i++) {
11283                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11284                 struct bnxt_cp_ring_info *cpr;
11285                 int rc;
11286
11287                 if (!rxr->bnapi->in_reset)
11288                         continue;
11289
11290                 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11291                 if (rc) {
11292                         if (rc == -EINVAL || rc == -EOPNOTSUPP)
11293                                 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11294                         else
11295                                 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11296                                             rc);
11297                         bnxt_reset_task(bp, true);
11298                         break;
11299                 }
11300                 bnxt_free_one_rx_ring_skbs(bp, i);
11301                 rxr->rx_prod = 0;
11302                 rxr->rx_agg_prod = 0;
11303                 rxr->rx_sw_agg_prod = 0;
11304                 rxr->rx_next_cons = 0;
11305                 rxr->bnapi->in_reset = false;
11306                 bnxt_alloc_one_rx_ring(bp, i);
11307                 cpr = &rxr->bnapi->cp_ring;
11308                 cpr->sw_stats.rx.rx_resets++;
11309                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11310                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11311                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11312         }
11313         if (bp->flags & BNXT_FLAG_TPA)
11314                 bnxt_set_tpa(bp, true);
11315         bnxt_rtnl_unlock_sp(bp);
11316 }
11317
11318 static void bnxt_fw_reset_close(struct bnxt *bp)
11319 {
11320         bnxt_ulp_stop(bp);
11321         /* When firmware is in fatal state, quiesce device and disable
11322          * bus master to prevent any potential bad DMAs before freeing
11323          * kernel memory.
11324          */
11325         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11326                 u16 val = 0;
11327
11328                 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11329                 if (val == 0xffff)
11330                         bp->fw_reset_min_dsecs = 0;
11331                 bnxt_tx_disable(bp);
11332                 bnxt_disable_napi(bp);
11333                 bnxt_disable_int_sync(bp);
11334                 bnxt_free_irq(bp);
11335                 bnxt_clear_int_mode(bp);
11336                 pci_disable_device(bp->pdev);
11337         }
11338         __bnxt_close_nic(bp, true, false);
11339         bnxt_vf_reps_free(bp);
11340         bnxt_clear_int_mode(bp);
11341         bnxt_hwrm_func_drv_unrgtr(bp);
11342         if (pci_is_enabled(bp->pdev))
11343                 pci_disable_device(bp->pdev);
11344         bnxt_free_ctx_mem(bp);
11345         kfree(bp->ctx);
11346         bp->ctx = NULL;
11347 }
11348
11349 static bool is_bnxt_fw_ok(struct bnxt *bp)
11350 {
11351         struct bnxt_fw_health *fw_health = bp->fw_health;
11352         bool no_heartbeat = false, has_reset = false;
11353         u32 val;
11354
11355         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11356         if (val == fw_health->last_fw_heartbeat)
11357                 no_heartbeat = true;
11358
11359         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11360         if (val != fw_health->last_fw_reset_cnt)
11361                 has_reset = true;
11362
11363         if (!no_heartbeat && has_reset)
11364                 return true;
11365
11366         return false;
11367 }
11368
11369 /* rtnl_lock is acquired before calling this function */
11370 static void bnxt_force_fw_reset(struct bnxt *bp)
11371 {
11372         struct bnxt_fw_health *fw_health = bp->fw_health;
11373         u32 wait_dsecs;
11374
11375         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11376             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11377                 return;
11378
11379         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11380         bnxt_fw_reset_close(bp);
11381         wait_dsecs = fw_health->master_func_wait_dsecs;
11382         if (fw_health->master) {
11383                 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11384                         wait_dsecs = 0;
11385                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11386         } else {
11387                 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11388                 wait_dsecs = fw_health->normal_func_wait_dsecs;
11389                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11390         }
11391
11392         bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11393         bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11394         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11395 }
11396
11397 void bnxt_fw_exception(struct bnxt *bp)
11398 {
11399         netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11400         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11401         bnxt_rtnl_lock_sp(bp);
11402         bnxt_force_fw_reset(bp);
11403         bnxt_rtnl_unlock_sp(bp);
11404 }
11405
11406 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11407  * < 0 on error.
11408  */
11409 static int bnxt_get_registered_vfs(struct bnxt *bp)
11410 {
11411 #ifdef CONFIG_BNXT_SRIOV
11412         int rc;
11413
11414         if (!BNXT_PF(bp))
11415                 return 0;
11416
11417         rc = bnxt_hwrm_func_qcfg(bp);
11418         if (rc) {
11419                 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11420                 return rc;
11421         }
11422         if (bp->pf.registered_vfs)
11423                 return bp->pf.registered_vfs;
11424         if (bp->sriov_cfg)
11425                 return 1;
11426 #endif
11427         return 0;
11428 }
11429
11430 void bnxt_fw_reset(struct bnxt *bp)
11431 {
11432         bnxt_rtnl_lock_sp(bp);
11433         if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11434             !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11435                 int n = 0, tmo;
11436
11437                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11438                 if (bp->pf.active_vfs &&
11439                     !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11440                         n = bnxt_get_registered_vfs(bp);
11441                 if (n < 0) {
11442                         netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11443                                    n);
11444                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11445                         dev_close(bp->dev);
11446                         goto fw_reset_exit;
11447                 } else if (n > 0) {
11448                         u16 vf_tmo_dsecs = n * 10;
11449
11450                         if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11451                                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11452                         bp->fw_reset_state =
11453                                 BNXT_FW_RESET_STATE_POLL_VF;
11454                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11455                         goto fw_reset_exit;
11456                 }
11457                 bnxt_fw_reset_close(bp);
11458                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11459                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11460                         tmo = HZ / 10;
11461                 } else {
11462                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11463                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
11464                 }
11465                 bnxt_queue_fw_reset_work(bp, tmo);
11466         }
11467 fw_reset_exit:
11468         bnxt_rtnl_unlock_sp(bp);
11469 }
11470
11471 static void bnxt_chk_missed_irq(struct bnxt *bp)
11472 {
11473         int i;
11474
11475         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11476                 return;
11477
11478         for (i = 0; i < bp->cp_nr_rings; i++) {
11479                 struct bnxt_napi *bnapi = bp->bnapi[i];
11480                 struct bnxt_cp_ring_info *cpr;
11481                 u32 fw_ring_id;
11482                 int j;
11483
11484                 if (!bnapi)
11485                         continue;
11486
11487                 cpr = &bnapi->cp_ring;
11488                 for (j = 0; j < 2; j++) {
11489                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11490                         u32 val[2];
11491
11492                         if (!cpr2 || cpr2->has_more_work ||
11493                             !bnxt_has_work(bp, cpr2))
11494                                 continue;
11495
11496                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11497                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11498                                 continue;
11499                         }
11500                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11501                         bnxt_dbg_hwrm_ring_info_get(bp,
11502                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11503                                 fw_ring_id, &val[0], &val[1]);
11504                         cpr->sw_stats.cmn.missed_irqs++;
11505                 }
11506         }
11507 }
11508
11509 static void bnxt_cfg_ntp_filters(struct bnxt *);
11510
11511 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11512 {
11513         struct bnxt_link_info *link_info = &bp->link_info;
11514
11515         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11516                 link_info->autoneg = BNXT_AUTONEG_SPEED;
11517                 if (bp->hwrm_spec_code >= 0x10201) {
11518                         if (link_info->auto_pause_setting &
11519                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11520                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11521                 } else {
11522                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11523                 }
11524                 link_info->advertising = link_info->auto_link_speeds;
11525                 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11526         } else {
11527                 link_info->req_link_speed = link_info->force_link_speed;
11528                 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11529                 if (link_info->force_pam4_link_speed) {
11530                         link_info->req_link_speed =
11531                                 link_info->force_pam4_link_speed;
11532                         link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11533                 }
11534                 link_info->req_duplex = link_info->duplex_setting;
11535         }
11536         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11537                 link_info->req_flow_ctrl =
11538                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11539         else
11540                 link_info->req_flow_ctrl = link_info->force_pause_setting;
11541 }
11542
11543 static void bnxt_fw_echo_reply(struct bnxt *bp)
11544 {
11545         struct bnxt_fw_health *fw_health = bp->fw_health;
11546         struct hwrm_func_echo_response_input req = {0};
11547
11548         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
11549         req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11550         req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11551         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11552 }
11553
11554 static void bnxt_sp_task(struct work_struct *work)
11555 {
11556         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11557
11558         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11559         smp_mb__after_atomic();
11560         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11561                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11562                 return;
11563         }
11564
11565         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11566                 bnxt_cfg_rx_mode(bp);
11567
11568         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11569                 bnxt_cfg_ntp_filters(bp);
11570         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11571                 bnxt_hwrm_exec_fwd_req(bp);
11572         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11573                 bnxt_hwrm_port_qstats(bp, 0);
11574                 bnxt_hwrm_port_qstats_ext(bp, 0);
11575                 bnxt_accumulate_all_stats(bp);
11576         }
11577
11578         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11579                 int rc;
11580
11581                 mutex_lock(&bp->link_lock);
11582                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11583                                        &bp->sp_event))
11584                         bnxt_hwrm_phy_qcaps(bp);
11585
11586                 rc = bnxt_update_link(bp, true);
11587                 if (rc)
11588                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11589                                    rc);
11590
11591                 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11592                                        &bp->sp_event))
11593                         bnxt_init_ethtool_link_settings(bp);
11594                 mutex_unlock(&bp->link_lock);
11595         }
11596         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11597                 int rc;
11598
11599                 mutex_lock(&bp->link_lock);
11600                 rc = bnxt_update_phy_setting(bp);
11601                 mutex_unlock(&bp->link_lock);
11602                 if (rc) {
11603                         netdev_warn(bp->dev, "update phy settings retry failed\n");
11604                 } else {
11605                         bp->link_info.phy_retry = false;
11606                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
11607                 }
11608         }
11609         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11610                 mutex_lock(&bp->link_lock);
11611                 bnxt_get_port_module_status(bp);
11612                 mutex_unlock(&bp->link_lock);
11613         }
11614
11615         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11616                 bnxt_tc_flow_stats_work(bp);
11617
11618         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11619                 bnxt_chk_missed_irq(bp);
11620
11621         if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11622                 bnxt_fw_echo_reply(bp);
11623
11624         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
11625          * must be the last functions to be called before exiting.
11626          */
11627         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11628                 bnxt_reset(bp, false);
11629
11630         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11631                 bnxt_reset(bp, true);
11632
11633         if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11634                 bnxt_rx_ring_reset(bp);
11635
11636         if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11637                 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11638
11639         if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11640                 if (!is_bnxt_fw_ok(bp))
11641                         bnxt_devlink_health_report(bp,
11642                                                    BNXT_FW_EXCEPTION_SP_EVENT);
11643         }
11644
11645         smp_mb__before_atomic();
11646         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11647 }
11648
11649 /* Under rtnl_lock */
11650 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11651                      int tx_xdp)
11652 {
11653         int max_rx, max_tx, tx_sets = 1;
11654         int tx_rings_needed, stats;
11655         int rx_rings = rx;
11656         int cp, vnics, rc;
11657
11658         if (tcs)
11659                 tx_sets = tcs;
11660
11661         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11662         if (rc)
11663                 return rc;
11664
11665         if (max_rx < rx)
11666                 return -ENOMEM;
11667
11668         tx_rings_needed = tx * tx_sets + tx_xdp;
11669         if (max_tx < tx_rings_needed)
11670                 return -ENOMEM;
11671
11672         vnics = 1;
11673         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11674                 vnics += rx_rings;
11675
11676         if (bp->flags & BNXT_FLAG_AGG_RINGS)
11677                 rx_rings <<= 1;
11678         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11679         stats = cp;
11680         if (BNXT_NEW_RM(bp)) {
11681                 cp += bnxt_get_ulp_msix_num(bp);
11682                 stats += bnxt_get_ulp_stat_ctxs(bp);
11683         }
11684         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11685                                      stats, vnics);
11686 }
11687
11688 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11689 {
11690         if (bp->bar2) {
11691                 pci_iounmap(pdev, bp->bar2);
11692                 bp->bar2 = NULL;
11693         }
11694
11695         if (bp->bar1) {
11696                 pci_iounmap(pdev, bp->bar1);
11697                 bp->bar1 = NULL;
11698         }
11699
11700         if (bp->bar0) {
11701                 pci_iounmap(pdev, bp->bar0);
11702                 bp->bar0 = NULL;
11703         }
11704 }
11705
11706 static void bnxt_cleanup_pci(struct bnxt *bp)
11707 {
11708         bnxt_unmap_bars(bp, bp->pdev);
11709         pci_release_regions(bp->pdev);
11710         if (pci_is_enabled(bp->pdev))
11711                 pci_disable_device(bp->pdev);
11712 }
11713
11714 static void bnxt_init_dflt_coal(struct bnxt *bp)
11715 {
11716         struct bnxt_coal *coal;
11717
11718         /* Tick values in micro seconds.
11719          * 1 coal_buf x bufs_per_record = 1 completion record.
11720          */
11721         coal = &bp->rx_coal;
11722         coal->coal_ticks = 10;
11723         coal->coal_bufs = 30;
11724         coal->coal_ticks_irq = 1;
11725         coal->coal_bufs_irq = 2;
11726         coal->idle_thresh = 50;
11727         coal->bufs_per_record = 2;
11728         coal->budget = 64;              /* NAPI budget */
11729
11730         coal = &bp->tx_coal;
11731         coal->coal_ticks = 28;
11732         coal->coal_bufs = 30;
11733         coal->coal_ticks_irq = 2;
11734         coal->coal_bufs_irq = 2;
11735         coal->bufs_per_record = 1;
11736
11737         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11738 }
11739
11740 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11741 {
11742         int rc;
11743
11744         bp->fw_cap = 0;
11745         rc = bnxt_hwrm_ver_get(bp);
11746         bnxt_try_map_fw_health_reg(bp);
11747         if (rc) {
11748                 rc = bnxt_try_recover_fw(bp);
11749                 if (rc)
11750                         return rc;
11751                 rc = bnxt_hwrm_ver_get(bp);
11752                 if (rc)
11753                         return rc;
11754         }
11755
11756         if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
11757                 rc = bnxt_alloc_kong_hwrm_resources(bp);
11758                 if (rc)
11759                         bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
11760         }
11761
11762         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
11763             bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
11764                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
11765                 if (rc)
11766                         return rc;
11767         }
11768         bnxt_nvm_cfg_ver_get(bp);
11769
11770         rc = bnxt_hwrm_func_reset(bp);
11771         if (rc)
11772                 return -ENODEV;
11773
11774         bnxt_hwrm_fw_set_time(bp);
11775         return 0;
11776 }
11777
11778 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11779 {
11780         int rc;
11781
11782         /* Get the MAX capabilities for this function */
11783         rc = bnxt_hwrm_func_qcaps(bp);
11784         if (rc) {
11785                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11786                            rc);
11787                 return -ENODEV;
11788         }
11789
11790         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11791         if (rc)
11792                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11793                             rc);
11794
11795         if (bnxt_alloc_fw_health(bp)) {
11796                 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11797         } else {
11798                 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11799                 if (rc)
11800                         netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11801                                     rc);
11802         }
11803
11804         rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11805         if (rc)
11806                 return -ENODEV;
11807
11808         bnxt_hwrm_func_qcfg(bp);
11809         bnxt_hwrm_vnic_qcaps(bp);
11810         bnxt_hwrm_port_led_qcaps(bp);
11811         bnxt_ethtool_init(bp);
11812         bnxt_dcb_init(bp);
11813         return 0;
11814 }
11815
11816 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11817 {
11818         bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11819         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11820                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11821                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11822                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11823         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11824                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11825                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11826                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11827         }
11828 }
11829
11830 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11831 {
11832         struct net_device *dev = bp->dev;
11833
11834         dev->hw_features &= ~NETIF_F_NTUPLE;
11835         dev->features &= ~NETIF_F_NTUPLE;
11836         bp->flags &= ~BNXT_FLAG_RFS;
11837         if (bnxt_rfs_supported(bp)) {
11838                 dev->hw_features |= NETIF_F_NTUPLE;
11839                 if (bnxt_rfs_capable(bp)) {
11840                         bp->flags |= BNXT_FLAG_RFS;
11841                         dev->features |= NETIF_F_NTUPLE;
11842                 }
11843         }
11844 }
11845
11846 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11847 {
11848         struct pci_dev *pdev = bp->pdev;
11849
11850         bnxt_set_dflt_rss_hash_type(bp);
11851         bnxt_set_dflt_rfs(bp);
11852
11853         bnxt_get_wol_settings(bp);
11854         if (bp->flags & BNXT_FLAG_WOL_CAP)
11855                 device_set_wakeup_enable(&pdev->dev, bp->wol);
11856         else
11857                 device_set_wakeup_capable(&pdev->dev, false);
11858
11859         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11860         bnxt_hwrm_coal_params_qcaps(bp);
11861 }
11862
11863 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11864
11865 static int bnxt_fw_init_one(struct bnxt *bp)
11866 {
11867         int rc;
11868
11869         rc = bnxt_fw_init_one_p1(bp);
11870         if (rc) {
11871                 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11872                 return rc;
11873         }
11874         rc = bnxt_fw_init_one_p2(bp);
11875         if (rc) {
11876                 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11877                 return rc;
11878         }
11879         rc = bnxt_probe_phy(bp, false);
11880         if (rc)
11881                 return rc;
11882         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11883         if (rc)
11884                 return rc;
11885
11886         /* In case fw capabilities have changed, destroy the unneeded
11887          * reporters and create newly capable ones.
11888          */
11889         bnxt_dl_fw_reporters_destroy(bp, false);
11890         bnxt_dl_fw_reporters_create(bp);
11891         bnxt_fw_init_one_p3(bp);
11892         return 0;
11893 }
11894
11895 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11896 {
11897         struct bnxt_fw_health *fw_health = bp->fw_health;
11898         u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11899         u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11900         u32 reg_type, reg_off, delay_msecs;
11901
11902         delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11903         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11904         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11905         switch (reg_type) {
11906         case BNXT_FW_HEALTH_REG_TYPE_CFG:
11907                 pci_write_config_dword(bp->pdev, reg_off, val);
11908                 break;
11909         case BNXT_FW_HEALTH_REG_TYPE_GRC:
11910                 writel(reg_off & BNXT_GRC_BASE_MASK,
11911                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11912                 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
11913                 fallthrough;
11914         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
11915                 writel(val, bp->bar0 + reg_off);
11916                 break;
11917         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
11918                 writel(val, bp->bar1 + reg_off);
11919                 break;
11920         }
11921         if (delay_msecs) {
11922                 pci_read_config_dword(bp->pdev, 0, &val);
11923                 msleep(delay_msecs);
11924         }
11925 }
11926
11927 static void bnxt_reset_all(struct bnxt *bp)
11928 {
11929         struct bnxt_fw_health *fw_health = bp->fw_health;
11930         int i, rc;
11931
11932         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11933                 bnxt_fw_reset_via_optee(bp);
11934                 bp->fw_reset_timestamp = jiffies;
11935                 return;
11936         }
11937
11938         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
11939                 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
11940                         bnxt_fw_reset_writel(bp, i);
11941         } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
11942                 struct hwrm_fw_reset_input req = {0};
11943
11944                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
11945                 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
11946                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
11947                 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
11948                 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
11949                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11950                 if (rc != -ENODEV)
11951                         netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
11952         }
11953         bp->fw_reset_timestamp = jiffies;
11954 }
11955
11956 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
11957 {
11958         return time_after(jiffies, bp->fw_reset_timestamp +
11959                           (bp->fw_reset_max_dsecs * HZ / 10));
11960 }
11961
11962 static void bnxt_fw_reset_task(struct work_struct *work)
11963 {
11964         struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
11965         int rc;
11966
11967         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11968                 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
11969                 return;
11970         }
11971
11972         switch (bp->fw_reset_state) {
11973         case BNXT_FW_RESET_STATE_POLL_VF: {
11974                 int n = bnxt_get_registered_vfs(bp);
11975                 int tmo;
11976
11977                 if (n < 0) {
11978                         netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
11979                                    n, jiffies_to_msecs(jiffies -
11980                                    bp->fw_reset_timestamp));
11981                         goto fw_reset_abort;
11982                 } else if (n > 0) {
11983                         if (bnxt_fw_reset_timeout(bp)) {
11984                                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11985                                 bp->fw_reset_state = 0;
11986                                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
11987                                            n);
11988                                 return;
11989                         }
11990                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11991                         return;
11992                 }
11993                 bp->fw_reset_timestamp = jiffies;
11994                 rtnl_lock();
11995                 bnxt_fw_reset_close(bp);
11996                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11997                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11998                         tmo = HZ / 10;
11999                 } else {
12000                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12001                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
12002                 }
12003                 rtnl_unlock();
12004                 bnxt_queue_fw_reset_work(bp, tmo);
12005                 return;
12006         }
12007         case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12008                 u32 val;
12009
12010                 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12011                 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12012                     !bnxt_fw_reset_timeout(bp)) {
12013                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12014                         return;
12015                 }
12016
12017                 if (!bp->fw_health->master) {
12018                         u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12019
12020                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12021                         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12022                         return;
12023                 }
12024                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12025         }
12026                 fallthrough;
12027         case BNXT_FW_RESET_STATE_RESET_FW:
12028                 bnxt_reset_all(bp);
12029                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12030                 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12031                 return;
12032         case BNXT_FW_RESET_STATE_ENABLE_DEV:
12033                 bnxt_inv_fw_health_reg(bp);
12034                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12035                     !bp->fw_reset_min_dsecs) {
12036                         u16 val;
12037
12038                         pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12039                         if (val == 0xffff) {
12040                                 if (bnxt_fw_reset_timeout(bp)) {
12041                                         netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12042                                         goto fw_reset_abort;
12043                                 }
12044                                 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12045                                 return;
12046                         }
12047                 }
12048                 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12049                 if (pci_enable_device(bp->pdev)) {
12050                         netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12051                         goto fw_reset_abort;
12052                 }
12053                 pci_set_master(bp->pdev);
12054                 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12055                 fallthrough;
12056         case BNXT_FW_RESET_STATE_POLL_FW:
12057                 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12058                 rc = __bnxt_hwrm_ver_get(bp, true);
12059                 if (rc) {
12060                         if (bnxt_fw_reset_timeout(bp)) {
12061                                 netdev_err(bp->dev, "Firmware reset aborted\n");
12062                                 goto fw_reset_abort_status;
12063                         }
12064                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12065                         return;
12066                 }
12067                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12068                 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12069                 fallthrough;
12070         case BNXT_FW_RESET_STATE_OPENING:
12071                 while (!rtnl_trylock()) {
12072                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12073                         return;
12074                 }
12075                 rc = bnxt_open(bp->dev);
12076                 if (rc) {
12077                         netdev_err(bp->dev, "bnxt_open_nic() failed\n");
12078                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12079                         dev_close(bp->dev);
12080                 }
12081
12082                 bp->fw_reset_state = 0;
12083                 /* Make sure fw_reset_state is 0 before clearing the flag */
12084                 smp_mb__before_atomic();
12085                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12086                 bnxt_ulp_start(bp, rc);
12087                 if (!rc)
12088                         bnxt_reenable_sriov(bp);
12089                 bnxt_vf_reps_alloc(bp);
12090                 bnxt_vf_reps_open(bp);
12091                 bnxt_dl_health_recovery_done(bp);
12092                 bnxt_dl_health_status_update(bp, true);
12093                 rtnl_unlock();
12094                 break;
12095         }
12096         return;
12097
12098 fw_reset_abort_status:
12099         if (bp->fw_health->status_reliable ||
12100             (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12101                 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12102
12103                 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12104         }
12105 fw_reset_abort:
12106         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12107         if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
12108                 bnxt_dl_health_status_update(bp, false);
12109         bp->fw_reset_state = 0;
12110         rtnl_lock();
12111         dev_close(bp->dev);
12112         rtnl_unlock();
12113 }
12114
12115 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12116 {
12117         int rc;
12118         struct bnxt *bp = netdev_priv(dev);
12119
12120         SET_NETDEV_DEV(dev, &pdev->dev);
12121
12122         /* enable device (incl. PCI PM wakeup), and bus-mastering */
12123         rc = pci_enable_device(pdev);
12124         if (rc) {
12125                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12126                 goto init_err;
12127         }
12128
12129         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12130                 dev_err(&pdev->dev,
12131                         "Cannot find PCI device base address, aborting\n");
12132                 rc = -ENODEV;
12133                 goto init_err_disable;
12134         }
12135
12136         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12137         if (rc) {
12138                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12139                 goto init_err_disable;
12140         }
12141
12142         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12143             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12144                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12145                 rc = -EIO;
12146                 goto init_err_release;
12147         }
12148
12149         pci_set_master(pdev);
12150
12151         bp->dev = dev;
12152         bp->pdev = pdev;
12153
12154         /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12155          * determines the BAR size.
12156          */
12157         bp->bar0 = pci_ioremap_bar(pdev, 0);
12158         if (!bp->bar0) {
12159                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12160                 rc = -ENOMEM;
12161                 goto init_err_release;
12162         }
12163
12164         bp->bar2 = pci_ioremap_bar(pdev, 4);
12165         if (!bp->bar2) {
12166                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12167                 rc = -ENOMEM;
12168                 goto init_err_release;
12169         }
12170
12171         pci_enable_pcie_error_reporting(pdev);
12172
12173         INIT_WORK(&bp->sp_task, bnxt_sp_task);
12174         INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12175
12176         spin_lock_init(&bp->ntp_fltr_lock);
12177 #if BITS_PER_LONG == 32
12178         spin_lock_init(&bp->db_lock);
12179 #endif
12180
12181         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12182         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12183
12184         bnxt_init_dflt_coal(bp);
12185
12186         timer_setup(&bp->timer, bnxt_timer, 0);
12187         bp->current_interval = BNXT_TIMER_INTERVAL;
12188
12189         bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12190         bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12191
12192         clear_bit(BNXT_STATE_OPEN, &bp->state);
12193         return 0;
12194
12195 init_err_release:
12196         bnxt_unmap_bars(bp, pdev);
12197         pci_release_regions(pdev);
12198
12199 init_err_disable:
12200         pci_disable_device(pdev);
12201
12202 init_err:
12203         return rc;
12204 }
12205
12206 /* rtnl_lock held */
12207 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12208 {
12209         struct sockaddr *addr = p;
12210         struct bnxt *bp = netdev_priv(dev);
12211         int rc = 0;
12212
12213         if (!is_valid_ether_addr(addr->sa_data))
12214                 return -EADDRNOTAVAIL;
12215
12216         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12217                 return 0;
12218
12219         rc = bnxt_approve_mac(bp, addr->sa_data, true);
12220         if (rc)
12221                 return rc;
12222
12223         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12224         if (netif_running(dev)) {
12225                 bnxt_close_nic(bp, false, false);
12226                 rc = bnxt_open_nic(bp, false, false);
12227         }
12228
12229         return rc;
12230 }
12231
12232 /* rtnl_lock held */
12233 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12234 {
12235         struct bnxt *bp = netdev_priv(dev);
12236
12237         if (netif_running(dev))
12238                 bnxt_close_nic(bp, true, false);
12239
12240         dev->mtu = new_mtu;
12241         bnxt_set_ring_params(bp);
12242
12243         if (netif_running(dev))
12244                 return bnxt_open_nic(bp, true, false);
12245
12246         return 0;
12247 }
12248
12249 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12250 {
12251         struct bnxt *bp = netdev_priv(dev);
12252         bool sh = false;
12253         int rc;
12254
12255         if (tc > bp->max_tc) {
12256                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12257                            tc, bp->max_tc);
12258                 return -EINVAL;
12259         }
12260
12261         if (netdev_get_num_tc(dev) == tc)
12262                 return 0;
12263
12264         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12265                 sh = true;
12266
12267         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12268                               sh, tc, bp->tx_nr_rings_xdp);
12269         if (rc)
12270                 return rc;
12271
12272         /* Needs to close the device and do hw resource re-allocations */
12273         if (netif_running(bp->dev))
12274                 bnxt_close_nic(bp, true, false);
12275
12276         if (tc) {
12277                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12278                 netdev_set_num_tc(dev, tc);
12279         } else {
12280                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12281                 netdev_reset_tc(dev);
12282         }
12283         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12284         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12285                                bp->tx_nr_rings + bp->rx_nr_rings;
12286
12287         if (netif_running(bp->dev))
12288                 return bnxt_open_nic(bp, true, false);
12289
12290         return 0;
12291 }
12292
12293 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12294                                   void *cb_priv)
12295 {
12296         struct bnxt *bp = cb_priv;
12297
12298         if (!bnxt_tc_flower_enabled(bp) ||
12299             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12300                 return -EOPNOTSUPP;
12301
12302         switch (type) {
12303         case TC_SETUP_CLSFLOWER:
12304                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12305         default:
12306                 return -EOPNOTSUPP;
12307         }
12308 }
12309
12310 LIST_HEAD(bnxt_block_cb_list);
12311
12312 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12313                          void *type_data)
12314 {
12315         struct bnxt *bp = netdev_priv(dev);
12316
12317         switch (type) {
12318         case TC_SETUP_BLOCK:
12319                 return flow_block_cb_setup_simple(type_data,
12320                                                   &bnxt_block_cb_list,
12321                                                   bnxt_setup_tc_block_cb,
12322                                                   bp, bp, true);
12323         case TC_SETUP_QDISC_MQPRIO: {
12324                 struct tc_mqprio_qopt *mqprio = type_data;
12325
12326                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12327
12328                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12329         }
12330         default:
12331                 return -EOPNOTSUPP;
12332         }
12333 }
12334
12335 #ifdef CONFIG_RFS_ACCEL
12336 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12337                             struct bnxt_ntuple_filter *f2)
12338 {
12339         struct flow_keys *keys1 = &f1->fkeys;
12340         struct flow_keys *keys2 = &f2->fkeys;
12341
12342         if (keys1->basic.n_proto != keys2->basic.n_proto ||
12343             keys1->basic.ip_proto != keys2->basic.ip_proto)
12344                 return false;
12345
12346         if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12347                 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12348                     keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12349                         return false;
12350         } else {
12351                 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12352                            sizeof(keys1->addrs.v6addrs.src)) ||
12353                     memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12354                            sizeof(keys1->addrs.v6addrs.dst)))
12355                         return false;
12356         }
12357
12358         if (keys1->ports.ports == keys2->ports.ports &&
12359             keys1->control.flags == keys2->control.flags &&
12360             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12361             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12362                 return true;
12363
12364         return false;
12365 }
12366
12367 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12368                               u16 rxq_index, u32 flow_id)
12369 {
12370         struct bnxt *bp = netdev_priv(dev);
12371         struct bnxt_ntuple_filter *fltr, *new_fltr;
12372         struct flow_keys *fkeys;
12373         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12374         int rc = 0, idx, bit_id, l2_idx = 0;
12375         struct hlist_head *head;
12376         u32 flags;
12377
12378         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12379                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12380                 int off = 0, j;
12381
12382                 netif_addr_lock_bh(dev);
12383                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12384                         if (ether_addr_equal(eth->h_dest,
12385                                              vnic->uc_list + off)) {
12386                                 l2_idx = j + 1;
12387                                 break;
12388                         }
12389                 }
12390                 netif_addr_unlock_bh(dev);
12391                 if (!l2_idx)
12392                         return -EINVAL;
12393         }
12394         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12395         if (!new_fltr)
12396                 return -ENOMEM;
12397
12398         fkeys = &new_fltr->fkeys;
12399         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12400                 rc = -EPROTONOSUPPORT;
12401                 goto err_free;
12402         }
12403
12404         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12405              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12406             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12407              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12408                 rc = -EPROTONOSUPPORT;
12409                 goto err_free;
12410         }
12411         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12412             bp->hwrm_spec_code < 0x10601) {
12413                 rc = -EPROTONOSUPPORT;
12414                 goto err_free;
12415         }
12416         flags = fkeys->control.flags;
12417         if (((flags & FLOW_DIS_ENCAPSULATION) &&
12418              bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12419                 rc = -EPROTONOSUPPORT;
12420                 goto err_free;
12421         }
12422
12423         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12424         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12425
12426         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12427         head = &bp->ntp_fltr_hash_tbl[idx];
12428         rcu_read_lock();
12429         hlist_for_each_entry_rcu(fltr, head, hash) {
12430                 if (bnxt_fltr_match(fltr, new_fltr)) {
12431                         rcu_read_unlock();
12432                         rc = 0;
12433                         goto err_free;
12434                 }
12435         }
12436         rcu_read_unlock();
12437
12438         spin_lock_bh(&bp->ntp_fltr_lock);
12439         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12440                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
12441         if (bit_id < 0) {
12442                 spin_unlock_bh(&bp->ntp_fltr_lock);
12443                 rc = -ENOMEM;
12444                 goto err_free;
12445         }
12446
12447         new_fltr->sw_id = (u16)bit_id;
12448         new_fltr->flow_id = flow_id;
12449         new_fltr->l2_fltr_idx = l2_idx;
12450         new_fltr->rxq = rxq_index;
12451         hlist_add_head_rcu(&new_fltr->hash, head);
12452         bp->ntp_fltr_count++;
12453         spin_unlock_bh(&bp->ntp_fltr_lock);
12454
12455         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12456         bnxt_queue_sp_work(bp);
12457
12458         return new_fltr->sw_id;
12459
12460 err_free:
12461         kfree(new_fltr);
12462         return rc;
12463 }
12464
12465 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12466 {
12467         int i;
12468
12469         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12470                 struct hlist_head *head;
12471                 struct hlist_node *tmp;
12472                 struct bnxt_ntuple_filter *fltr;
12473                 int rc;
12474
12475                 head = &bp->ntp_fltr_hash_tbl[i];
12476                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12477                         bool del = false;
12478
12479                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12480                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12481                                                         fltr->flow_id,
12482                                                         fltr->sw_id)) {
12483                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
12484                                                                          fltr);
12485                                         del = true;
12486                                 }
12487                         } else {
12488                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12489                                                                        fltr);
12490                                 if (rc)
12491                                         del = true;
12492                                 else
12493                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
12494                         }
12495
12496                         if (del) {
12497                                 spin_lock_bh(&bp->ntp_fltr_lock);
12498                                 hlist_del_rcu(&fltr->hash);
12499                                 bp->ntp_fltr_count--;
12500                                 spin_unlock_bh(&bp->ntp_fltr_lock);
12501                                 synchronize_rcu();
12502                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12503                                 kfree(fltr);
12504                         }
12505                 }
12506         }
12507         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12508                 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12509 }
12510
12511 #else
12512
12513 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12514 {
12515 }
12516
12517 #endif /* CONFIG_RFS_ACCEL */
12518
12519 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12520 {
12521         struct bnxt *bp = netdev_priv(netdev);
12522         struct udp_tunnel_info ti;
12523         unsigned int cmd;
12524
12525         udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12526         if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
12527                 bp->vxlan_port = ti.port;
12528                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12529         } else {
12530                 bp->nge_port = ti.port;
12531                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12532         }
12533
12534         if (ti.port)
12535                 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12536
12537         return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12538 }
12539
12540 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12541         .sync_table     = bnxt_udp_tunnel_sync,
12542         .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12543                           UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12544         .tables         = {
12545                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
12546                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12547         },
12548 };
12549
12550 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12551                                struct net_device *dev, u32 filter_mask,
12552                                int nlflags)
12553 {
12554         struct bnxt *bp = netdev_priv(dev);
12555
12556         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12557                                        nlflags, filter_mask, NULL);
12558 }
12559
12560 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12561                                u16 flags, struct netlink_ext_ack *extack)
12562 {
12563         struct bnxt *bp = netdev_priv(dev);
12564         struct nlattr *attr, *br_spec;
12565         int rem, rc = 0;
12566
12567         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12568                 return -EOPNOTSUPP;
12569
12570         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12571         if (!br_spec)
12572                 return -EINVAL;
12573
12574         nla_for_each_nested(attr, br_spec, rem) {
12575                 u16 mode;
12576
12577                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12578                         continue;
12579
12580                 if (nla_len(attr) < sizeof(mode))
12581                         return -EINVAL;
12582
12583                 mode = nla_get_u16(attr);
12584                 if (mode == bp->br_mode)
12585                         break;
12586
12587                 rc = bnxt_hwrm_set_br_mode(bp, mode);
12588                 if (!rc)
12589                         bp->br_mode = mode;
12590                 break;
12591         }
12592         return rc;
12593 }
12594
12595 int bnxt_get_port_parent_id(struct net_device *dev,
12596                             struct netdev_phys_item_id *ppid)
12597 {
12598         struct bnxt *bp = netdev_priv(dev);
12599
12600         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12601                 return -EOPNOTSUPP;
12602
12603         /* The PF and it's VF-reps only support the switchdev framework */
12604         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12605                 return -EOPNOTSUPP;
12606
12607         ppid->id_len = sizeof(bp->dsn);
12608         memcpy(ppid->id, bp->dsn, ppid->id_len);
12609
12610         return 0;
12611 }
12612
12613 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12614 {
12615         struct bnxt *bp = netdev_priv(dev);
12616
12617         return &bp->dl_port;
12618 }
12619
12620 static const struct net_device_ops bnxt_netdev_ops = {
12621         .ndo_open               = bnxt_open,
12622         .ndo_start_xmit         = bnxt_start_xmit,
12623         .ndo_stop               = bnxt_close,
12624         .ndo_get_stats64        = bnxt_get_stats64,
12625         .ndo_set_rx_mode        = bnxt_set_rx_mode,
12626         .ndo_do_ioctl           = bnxt_ioctl,
12627         .ndo_validate_addr      = eth_validate_addr,
12628         .ndo_set_mac_address    = bnxt_change_mac_addr,
12629         .ndo_change_mtu         = bnxt_change_mtu,
12630         .ndo_fix_features       = bnxt_fix_features,
12631         .ndo_set_features       = bnxt_set_features,
12632         .ndo_features_check     = bnxt_features_check,
12633         .ndo_tx_timeout         = bnxt_tx_timeout,
12634 #ifdef CONFIG_BNXT_SRIOV
12635         .ndo_get_vf_config      = bnxt_get_vf_config,
12636         .ndo_set_vf_mac         = bnxt_set_vf_mac,
12637         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
12638         .ndo_set_vf_rate        = bnxt_set_vf_bw,
12639         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
12640         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
12641         .ndo_set_vf_trust       = bnxt_set_vf_trust,
12642 #endif
12643         .ndo_setup_tc           = bnxt_setup_tc,
12644 #ifdef CONFIG_RFS_ACCEL
12645         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
12646 #endif
12647         .ndo_bpf                = bnxt_xdp,
12648         .ndo_xdp_xmit           = bnxt_xdp_xmit,
12649         .ndo_bridge_getlink     = bnxt_bridge_getlink,
12650         .ndo_bridge_setlink     = bnxt_bridge_setlink,
12651         .ndo_get_devlink_port   = bnxt_get_devlink_port,
12652 };
12653
12654 static void bnxt_remove_one(struct pci_dev *pdev)
12655 {
12656         struct net_device *dev = pci_get_drvdata(pdev);
12657         struct bnxt *bp = netdev_priv(dev);
12658
12659         if (BNXT_PF(bp))
12660                 bnxt_sriov_disable(bp);
12661
12662         if (BNXT_PF(bp))
12663                 devlink_port_type_clear(&bp->dl_port);
12664
12665         bnxt_ptp_clear(bp);
12666         pci_disable_pcie_error_reporting(pdev);
12667         unregister_netdev(dev);
12668         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12669         /* Flush any pending tasks */
12670         cancel_work_sync(&bp->sp_task);
12671         cancel_delayed_work_sync(&bp->fw_reset_task);
12672         bp->sp_event = 0;
12673
12674         bnxt_dl_fw_reporters_destroy(bp, true);
12675         bnxt_dl_unregister(bp);
12676         bnxt_shutdown_tc(bp);
12677
12678         bnxt_clear_int_mode(bp);
12679         bnxt_hwrm_func_drv_unrgtr(bp);
12680         bnxt_free_hwrm_resources(bp);
12681         bnxt_free_hwrm_short_cmd_req(bp);
12682         bnxt_ethtool_free(bp);
12683         bnxt_dcb_free(bp);
12684         kfree(bp->edev);
12685         bp->edev = NULL;
12686         kfree(bp->ptp_cfg);
12687         bp->ptp_cfg = NULL;
12688         kfree(bp->fw_health);
12689         bp->fw_health = NULL;
12690         bnxt_cleanup_pci(bp);
12691         bnxt_free_ctx_mem(bp);
12692         kfree(bp->ctx);
12693         bp->ctx = NULL;
12694         kfree(bp->rss_indir_tbl);
12695         bp->rss_indir_tbl = NULL;
12696         bnxt_free_port_stats(bp);
12697         free_netdev(dev);
12698 }
12699
12700 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12701 {
12702         int rc = 0;
12703         struct bnxt_link_info *link_info = &bp->link_info;
12704
12705         bp->phy_flags = 0;
12706         rc = bnxt_hwrm_phy_qcaps(bp);
12707         if (rc) {
12708                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12709                            rc);
12710                 return rc;
12711         }
12712         if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12713                 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12714         else
12715                 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
12716         if (!fw_dflt)
12717                 return 0;
12718
12719         rc = bnxt_update_link(bp, false);
12720         if (rc) {
12721                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12722                            rc);
12723                 return rc;
12724         }
12725
12726         /* Older firmware does not have supported_auto_speeds, so assume
12727          * that all supported speeds can be autonegotiated.
12728          */
12729         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12730                 link_info->support_auto_speeds = link_info->support_speeds;
12731
12732         bnxt_init_ethtool_link_settings(bp);
12733         return 0;
12734 }
12735
12736 static int bnxt_get_max_irq(struct pci_dev *pdev)
12737 {
12738         u16 ctrl;
12739
12740         if (!pdev->msix_cap)
12741                 return 1;
12742
12743         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12744         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12745 }
12746
12747 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12748                                 int *max_cp)
12749 {
12750         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12751         int max_ring_grps = 0, max_irq;
12752
12753         *max_tx = hw_resc->max_tx_rings;
12754         *max_rx = hw_resc->max_rx_rings;
12755         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12756         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12757                         bnxt_get_ulp_msix_num(bp),
12758                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12759         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12760                 *max_cp = min_t(int, *max_cp, max_irq);
12761         max_ring_grps = hw_resc->max_hw_ring_grps;
12762         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12763                 *max_cp -= 1;
12764                 *max_rx -= 2;
12765         }
12766         if (bp->flags & BNXT_FLAG_AGG_RINGS)
12767                 *max_rx >>= 1;
12768         if (bp->flags & BNXT_FLAG_CHIP_P5) {
12769                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12770                 /* On P5 chips, max_cp output param should be available NQs */
12771                 *max_cp = max_irq;
12772         }
12773         *max_rx = min_t(int, *max_rx, max_ring_grps);
12774 }
12775
12776 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12777 {
12778         int rx, tx, cp;
12779
12780         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
12781         *max_rx = rx;
12782         *max_tx = tx;
12783         if (!rx || !tx || !cp)
12784                 return -ENOMEM;
12785
12786         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12787 }
12788
12789 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12790                                bool shared)
12791 {
12792         int rc;
12793
12794         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12795         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12796                 /* Not enough rings, try disabling agg rings. */
12797                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12798                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12799                 if (rc) {
12800                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
12801                         bp->flags |= BNXT_FLAG_AGG_RINGS;
12802                         return rc;
12803                 }
12804                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12805                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12806                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12807                 bnxt_set_ring_params(bp);
12808         }
12809
12810         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12811                 int max_cp, max_stat, max_irq;
12812
12813                 /* Reserve minimum resources for RoCE */
12814                 max_cp = bnxt_get_max_func_cp_rings(bp);
12815                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12816                 max_irq = bnxt_get_max_func_irqs(bp);
12817                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12818                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12819                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12820                         return 0;
12821
12822                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12823                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12824                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12825                 max_cp = min_t(int, max_cp, max_irq);
12826                 max_cp = min_t(int, max_cp, max_stat);
12827                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12828                 if (rc)
12829                         rc = 0;
12830         }
12831         return rc;
12832 }
12833
12834 /* In initial default shared ring setting, each shared ring must have a
12835  * RX/TX ring pair.
12836  */
12837 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12838 {
12839         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12840         bp->rx_nr_rings = bp->cp_nr_rings;
12841         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12842         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12843 }
12844
12845 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12846 {
12847         int dflt_rings, max_rx_rings, max_tx_rings, rc;
12848
12849         if (!bnxt_can_reserve_rings(bp))
12850                 return 0;
12851
12852         if (sh)
12853                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
12854         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
12855         /* Reduce default rings on multi-port cards so that total default
12856          * rings do not exceed CPU count.
12857          */
12858         if (bp->port_count > 1) {
12859                 int max_rings =
12860                         max_t(int, num_online_cpus() / bp->port_count, 1);
12861
12862                 dflt_rings = min_t(int, dflt_rings, max_rings);
12863         }
12864         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
12865         if (rc)
12866                 return rc;
12867         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12868         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
12869         if (sh)
12870                 bnxt_trim_dflt_sh_rings(bp);
12871         else
12872                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12873         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12874
12875         rc = __bnxt_reserve_rings(bp);
12876         if (rc)
12877                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
12878         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12879         if (sh)
12880                 bnxt_trim_dflt_sh_rings(bp);
12881
12882         /* Rings may have been trimmed, re-reserve the trimmed rings. */
12883         if (bnxt_need_reserve_rings(bp)) {
12884                 rc = __bnxt_reserve_rings(bp);
12885                 if (rc)
12886                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12887                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12888         }
12889         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12890                 bp->rx_nr_rings++;
12891                 bp->cp_nr_rings++;
12892         }
12893         if (rc) {
12894                 bp->tx_nr_rings = 0;
12895                 bp->rx_nr_rings = 0;
12896         }
12897         return rc;
12898 }
12899
12900 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
12901 {
12902         int rc;
12903
12904         if (bp->tx_nr_rings)
12905                 return 0;
12906
12907         bnxt_ulp_irq_stop(bp);
12908         bnxt_clear_int_mode(bp);
12909         rc = bnxt_set_dflt_rings(bp, true);
12910         if (rc) {
12911                 netdev_err(bp->dev, "Not enough rings available.\n");
12912                 goto init_dflt_ring_err;
12913         }
12914         rc = bnxt_init_int_mode(bp);
12915         if (rc)
12916                 goto init_dflt_ring_err;
12917
12918         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12919         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
12920                 bp->flags |= BNXT_FLAG_RFS;
12921                 bp->dev->features |= NETIF_F_NTUPLE;
12922         }
12923 init_dflt_ring_err:
12924         bnxt_ulp_irq_restart(bp, rc);
12925         return rc;
12926 }
12927
12928 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
12929 {
12930         int rc;
12931
12932         ASSERT_RTNL();
12933         bnxt_hwrm_func_qcaps(bp);
12934
12935         if (netif_running(bp->dev))
12936                 __bnxt_close_nic(bp, true, false);
12937
12938         bnxt_ulp_irq_stop(bp);
12939         bnxt_clear_int_mode(bp);
12940         rc = bnxt_init_int_mode(bp);
12941         bnxt_ulp_irq_restart(bp, rc);
12942
12943         if (netif_running(bp->dev)) {
12944                 if (rc)
12945                         dev_close(bp->dev);
12946                 else
12947                         rc = bnxt_open_nic(bp, true, false);
12948         }
12949
12950         return rc;
12951 }
12952
12953 static int bnxt_init_mac_addr(struct bnxt *bp)
12954 {
12955         int rc = 0;
12956
12957         if (BNXT_PF(bp)) {
12958                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
12959         } else {
12960 #ifdef CONFIG_BNXT_SRIOV
12961                 struct bnxt_vf_info *vf = &bp->vf;
12962                 bool strict_approval = true;
12963
12964                 if (is_valid_ether_addr(vf->mac_addr)) {
12965                         /* overwrite netdev dev_addr with admin VF MAC */
12966                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
12967                         /* Older PF driver or firmware may not approve this
12968                          * correctly.
12969                          */
12970                         strict_approval = false;
12971                 } else {
12972                         eth_hw_addr_random(bp->dev);
12973                 }
12974                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
12975 #endif
12976         }
12977         return rc;
12978 }
12979
12980 static void bnxt_vpd_read_info(struct bnxt *bp)
12981 {
12982         struct pci_dev *pdev = bp->pdev;
12983         unsigned int vpd_size, kw_len;
12984         int pos, size;
12985         u8 *vpd_data;
12986
12987         vpd_data = pci_vpd_alloc(pdev, &vpd_size);
12988         if (IS_ERR(vpd_data)) {
12989                 pci_warn(pdev, "Unable to read VPD\n");
12990                 return;
12991         }
12992
12993         pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
12994                                            PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
12995         if (pos < 0)
12996                 goto read_sn;
12997
12998         size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
12999         memcpy(bp->board_partno, &vpd_data[pos], size);
13000
13001 read_sn:
13002         pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13003                                            PCI_VPD_RO_KEYWORD_SERIALNO,
13004                                            &kw_len);
13005         if (pos < 0)
13006                 goto exit;
13007
13008         size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13009         memcpy(bp->board_serialno, &vpd_data[pos], size);
13010 exit:
13011         kfree(vpd_data);
13012 }
13013
13014 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13015 {
13016         struct pci_dev *pdev = bp->pdev;
13017         u64 qword;
13018
13019         qword = pci_get_dsn(pdev);
13020         if (!qword) {
13021                 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13022                 return -EOPNOTSUPP;
13023         }
13024
13025         put_unaligned_le64(qword, dsn);
13026
13027         bp->flags |= BNXT_FLAG_DSN_VALID;
13028         return 0;
13029 }
13030
13031 static int bnxt_map_db_bar(struct bnxt *bp)
13032 {
13033         if (!bp->db_size)
13034                 return -ENODEV;
13035         bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13036         if (!bp->bar1)
13037                 return -ENOMEM;
13038         return 0;
13039 }
13040
13041 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13042 {
13043         struct net_device *dev;
13044         struct bnxt *bp;
13045         int rc, max_irqs;
13046
13047         if (pci_is_bridge(pdev))
13048                 return -ENODEV;
13049
13050         /* Clear any pending DMA transactions from crash kernel
13051          * while loading driver in capture kernel.
13052          */
13053         if (is_kdump_kernel()) {
13054                 pci_clear_master(pdev);
13055                 pcie_flr(pdev);
13056         }
13057
13058         max_irqs = bnxt_get_max_irq(pdev);
13059         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13060         if (!dev)
13061                 return -ENOMEM;
13062
13063         bp = netdev_priv(dev);
13064         bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13065         bnxt_set_max_func_irqs(bp, max_irqs);
13066
13067         if (bnxt_vf_pciid(ent->driver_data))
13068                 bp->flags |= BNXT_FLAG_VF;
13069
13070         if (pdev->msix_cap)
13071                 bp->flags |= BNXT_FLAG_MSIX_CAP;
13072
13073         rc = bnxt_init_board(pdev, dev);
13074         if (rc < 0)
13075                 goto init_err_free;
13076
13077         dev->netdev_ops = &bnxt_netdev_ops;
13078         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13079         dev->ethtool_ops = &bnxt_ethtool_ops;
13080         pci_set_drvdata(pdev, dev);
13081
13082         rc = bnxt_alloc_hwrm_resources(bp);
13083         if (rc)
13084                 goto init_err_pci_clean;
13085
13086         mutex_init(&bp->hwrm_cmd_lock);
13087         mutex_init(&bp->link_lock);
13088
13089         rc = bnxt_fw_init_one_p1(bp);
13090         if (rc)
13091                 goto init_err_pci_clean;
13092
13093         if (BNXT_PF(bp))
13094                 bnxt_vpd_read_info(bp);
13095
13096         if (BNXT_CHIP_P5(bp)) {
13097                 bp->flags |= BNXT_FLAG_CHIP_P5;
13098                 if (BNXT_CHIP_SR2(bp))
13099                         bp->flags |= BNXT_FLAG_CHIP_SR2;
13100         }
13101
13102         rc = bnxt_alloc_rss_indir_tbl(bp);
13103         if (rc)
13104                 goto init_err_pci_clean;
13105
13106         rc = bnxt_fw_init_one_p2(bp);
13107         if (rc)
13108                 goto init_err_pci_clean;
13109
13110         rc = bnxt_map_db_bar(bp);
13111         if (rc) {
13112                 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13113                         rc);
13114                 goto init_err_pci_clean;
13115         }
13116
13117         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13118                            NETIF_F_TSO | NETIF_F_TSO6 |
13119                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13120                            NETIF_F_GSO_IPXIP4 |
13121                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13122                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13123                            NETIF_F_RXCSUM | NETIF_F_GRO;
13124
13125         if (BNXT_SUPPORTS_TPA(bp))
13126                 dev->hw_features |= NETIF_F_LRO;
13127
13128         dev->hw_enc_features =
13129                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13130                         NETIF_F_TSO | NETIF_F_TSO6 |
13131                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13132                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13133                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13134         dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13135
13136         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13137                                     NETIF_F_GSO_GRE_CSUM;
13138         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13139         if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13140                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13141         if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13142                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13143         if (BNXT_SUPPORTS_TPA(bp))
13144                 dev->hw_features |= NETIF_F_GRO_HW;
13145         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13146         if (dev->features & NETIF_F_GRO_HW)
13147                 dev->features &= ~NETIF_F_LRO;
13148         dev->priv_flags |= IFF_UNICAST_FLT;
13149
13150 #ifdef CONFIG_BNXT_SRIOV
13151         init_waitqueue_head(&bp->sriov_cfg_wait);
13152         mutex_init(&bp->sriov_lock);
13153 #endif
13154         if (BNXT_SUPPORTS_TPA(bp)) {
13155                 bp->gro_func = bnxt_gro_func_5730x;
13156                 if (BNXT_CHIP_P4(bp))
13157                         bp->gro_func = bnxt_gro_func_5731x;
13158                 else if (BNXT_CHIP_P5(bp))
13159                         bp->gro_func = bnxt_gro_func_5750x;
13160         }
13161         if (!BNXT_CHIP_P4_PLUS(bp))
13162                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13163
13164         rc = bnxt_init_mac_addr(bp);
13165         if (rc) {
13166                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13167                 rc = -EADDRNOTAVAIL;
13168                 goto init_err_pci_clean;
13169         }
13170
13171         if (BNXT_PF(bp)) {
13172                 /* Read the adapter's DSN to use as the eswitch switch_id */
13173                 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13174         }
13175
13176         /* MTU range: 60 - FW defined max */
13177         dev->min_mtu = ETH_ZLEN;
13178         dev->max_mtu = bp->max_mtu;
13179
13180         rc = bnxt_probe_phy(bp, true);
13181         if (rc)
13182                 goto init_err_pci_clean;
13183
13184         bnxt_set_rx_skb_mode(bp, false);
13185         bnxt_set_tpa_flags(bp);
13186         bnxt_set_ring_params(bp);
13187         rc = bnxt_set_dflt_rings(bp, true);
13188         if (rc) {
13189                 netdev_err(bp->dev, "Not enough rings available.\n");
13190                 rc = -ENOMEM;
13191                 goto init_err_pci_clean;
13192         }
13193
13194         bnxt_fw_init_one_p3(bp);
13195
13196         if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13197                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13198
13199         rc = bnxt_init_int_mode(bp);
13200         if (rc)
13201                 goto init_err_pci_clean;
13202
13203         /* No TC has been set yet and rings may have been trimmed due to
13204          * limited MSIX, so we re-initialize the TX rings per TC.
13205          */
13206         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13207
13208         if (BNXT_PF(bp)) {
13209                 if (!bnxt_pf_wq) {
13210                         bnxt_pf_wq =
13211                                 create_singlethread_workqueue("bnxt_pf_wq");
13212                         if (!bnxt_pf_wq) {
13213                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13214                                 rc = -ENOMEM;
13215                                 goto init_err_pci_clean;
13216                         }
13217                 }
13218                 rc = bnxt_init_tc(bp);
13219                 if (rc)
13220                         netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13221                                    rc);
13222         }
13223
13224         if (bnxt_ptp_init(bp)) {
13225                 netdev_warn(dev, "PTP initialization failed.\n");
13226                 kfree(bp->ptp_cfg);
13227                 bp->ptp_cfg = NULL;
13228         }
13229         bnxt_inv_fw_health_reg(bp);
13230         bnxt_dl_register(bp);
13231
13232         rc = register_netdev(dev);
13233         if (rc)
13234                 goto init_err_cleanup;
13235
13236         if (BNXT_PF(bp))
13237                 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13238         bnxt_dl_fw_reporters_create(bp);
13239
13240         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13241                     board_info[ent->driver_data].name,
13242                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
13243         pcie_print_link_status(pdev);
13244
13245         pci_save_state(pdev);
13246         return 0;
13247
13248 init_err_cleanup:
13249         bnxt_dl_unregister(bp);
13250         bnxt_shutdown_tc(bp);
13251         bnxt_clear_int_mode(bp);
13252
13253 init_err_pci_clean:
13254         bnxt_hwrm_func_drv_unrgtr(bp);
13255         bnxt_free_hwrm_short_cmd_req(bp);
13256         bnxt_free_hwrm_resources(bp);
13257         bnxt_ethtool_free(bp);
13258         kfree(bp->ptp_cfg);
13259         bp->ptp_cfg = NULL;
13260         kfree(bp->fw_health);
13261         bp->fw_health = NULL;
13262         bnxt_cleanup_pci(bp);
13263         bnxt_free_ctx_mem(bp);
13264         kfree(bp->ctx);
13265         bp->ctx = NULL;
13266         kfree(bp->rss_indir_tbl);
13267         bp->rss_indir_tbl = NULL;
13268
13269 init_err_free:
13270         free_netdev(dev);
13271         return rc;
13272 }
13273
13274 static void bnxt_shutdown(struct pci_dev *pdev)
13275 {
13276         struct net_device *dev = pci_get_drvdata(pdev);
13277         struct bnxt *bp;
13278
13279         if (!dev)
13280                 return;
13281
13282         rtnl_lock();
13283         bp = netdev_priv(dev);
13284         if (!bp)
13285                 goto shutdown_exit;
13286
13287         if (netif_running(dev))
13288                 dev_close(dev);
13289
13290         bnxt_ulp_shutdown(bp);
13291         bnxt_clear_int_mode(bp);
13292         pci_disable_device(pdev);
13293
13294         if (system_state == SYSTEM_POWER_OFF) {
13295                 pci_wake_from_d3(pdev, bp->wol);
13296                 pci_set_power_state(pdev, PCI_D3hot);
13297         }
13298
13299 shutdown_exit:
13300         rtnl_unlock();
13301 }
13302
13303 #ifdef CONFIG_PM_SLEEP
13304 static int bnxt_suspend(struct device *device)
13305 {
13306         struct net_device *dev = dev_get_drvdata(device);
13307         struct bnxt *bp = netdev_priv(dev);
13308         int rc = 0;
13309
13310         rtnl_lock();
13311         bnxt_ulp_stop(bp);
13312         if (netif_running(dev)) {
13313                 netif_device_detach(dev);
13314                 rc = bnxt_close(dev);
13315         }
13316         bnxt_hwrm_func_drv_unrgtr(bp);
13317         pci_disable_device(bp->pdev);
13318         bnxt_free_ctx_mem(bp);
13319         kfree(bp->ctx);
13320         bp->ctx = NULL;
13321         rtnl_unlock();
13322         return rc;
13323 }
13324
13325 static int bnxt_resume(struct device *device)
13326 {
13327         struct net_device *dev = dev_get_drvdata(device);
13328         struct bnxt *bp = netdev_priv(dev);
13329         int rc = 0;
13330
13331         rtnl_lock();
13332         rc = pci_enable_device(bp->pdev);
13333         if (rc) {
13334                 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13335                            rc);
13336                 goto resume_exit;
13337         }
13338         pci_set_master(bp->pdev);
13339         if (bnxt_hwrm_ver_get(bp)) {
13340                 rc = -ENODEV;
13341                 goto resume_exit;
13342         }
13343         rc = bnxt_hwrm_func_reset(bp);
13344         if (rc) {
13345                 rc = -EBUSY;
13346                 goto resume_exit;
13347         }
13348
13349         rc = bnxt_hwrm_func_qcaps(bp);
13350         if (rc)
13351                 goto resume_exit;
13352
13353         if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13354                 rc = -ENODEV;
13355                 goto resume_exit;
13356         }
13357
13358         bnxt_get_wol_settings(bp);
13359         if (netif_running(dev)) {
13360                 rc = bnxt_open(dev);
13361                 if (!rc)
13362                         netif_device_attach(dev);
13363         }
13364
13365 resume_exit:
13366         bnxt_ulp_start(bp, rc);
13367         if (!rc)
13368                 bnxt_reenable_sriov(bp);
13369         rtnl_unlock();
13370         return rc;
13371 }
13372
13373 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13374 #define BNXT_PM_OPS (&bnxt_pm_ops)
13375
13376 #else
13377
13378 #define BNXT_PM_OPS NULL
13379
13380 #endif /* CONFIG_PM_SLEEP */
13381
13382 /**
13383  * bnxt_io_error_detected - called when PCI error is detected
13384  * @pdev: Pointer to PCI device
13385  * @state: The current pci connection state
13386  *
13387  * This function is called after a PCI bus error affecting
13388  * this device has been detected.
13389  */
13390 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13391                                                pci_channel_state_t state)
13392 {
13393         struct net_device *netdev = pci_get_drvdata(pdev);
13394         struct bnxt *bp = netdev_priv(netdev);
13395
13396         netdev_info(netdev, "PCI I/O error detected\n");
13397
13398         rtnl_lock();
13399         netif_device_detach(netdev);
13400
13401         bnxt_ulp_stop(bp);
13402
13403         if (state == pci_channel_io_perm_failure) {
13404                 rtnl_unlock();
13405                 return PCI_ERS_RESULT_DISCONNECT;
13406         }
13407
13408         if (state == pci_channel_io_frozen)
13409                 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13410
13411         if (netif_running(netdev))
13412                 bnxt_close(netdev);
13413
13414         pci_disable_device(pdev);
13415         bnxt_free_ctx_mem(bp);
13416         kfree(bp->ctx);
13417         bp->ctx = NULL;
13418         rtnl_unlock();
13419
13420         /* Request a slot slot reset. */
13421         return PCI_ERS_RESULT_NEED_RESET;
13422 }
13423
13424 /**
13425  * bnxt_io_slot_reset - called after the pci bus has been reset.
13426  * @pdev: Pointer to PCI device
13427  *
13428  * Restart the card from scratch, as if from a cold-boot.
13429  * At this point, the card has exprienced a hard reset,
13430  * followed by fixups by BIOS, and has its config space
13431  * set up identically to what it was at cold boot.
13432  */
13433 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13434 {
13435         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13436         struct net_device *netdev = pci_get_drvdata(pdev);
13437         struct bnxt *bp = netdev_priv(netdev);
13438         int err = 0, off;
13439
13440         netdev_info(bp->dev, "PCI Slot Reset\n");
13441
13442         rtnl_lock();
13443
13444         if (pci_enable_device(pdev)) {
13445                 dev_err(&pdev->dev,
13446                         "Cannot re-enable PCI device after reset.\n");
13447         } else {
13448                 pci_set_master(pdev);
13449                 /* Upon fatal error, our device internal logic that latches to
13450                  * BAR value is getting reset and will restore only upon
13451                  * rewritting the BARs.
13452                  *
13453                  * As pci_restore_state() does not re-write the BARs if the
13454                  * value is same as saved value earlier, driver needs to
13455                  * write the BARs to 0 to force restore, in case of fatal error.
13456                  */
13457                 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13458                                        &bp->state)) {
13459                         for (off = PCI_BASE_ADDRESS_0;
13460                              off <= PCI_BASE_ADDRESS_5; off += 4)
13461                                 pci_write_config_dword(bp->pdev, off, 0);
13462                 }
13463                 pci_restore_state(pdev);
13464                 pci_save_state(pdev);
13465
13466                 err = bnxt_hwrm_func_reset(bp);
13467                 if (!err)
13468                         result = PCI_ERS_RESULT_RECOVERED;
13469         }
13470
13471         rtnl_unlock();
13472
13473         return result;
13474 }
13475
13476 /**
13477  * bnxt_io_resume - called when traffic can start flowing again.
13478  * @pdev: Pointer to PCI device
13479  *
13480  * This callback is called when the error recovery driver tells
13481  * us that its OK to resume normal operation.
13482  */
13483 static void bnxt_io_resume(struct pci_dev *pdev)
13484 {
13485         struct net_device *netdev = pci_get_drvdata(pdev);
13486         struct bnxt *bp = netdev_priv(netdev);
13487         int err;
13488
13489         netdev_info(bp->dev, "PCI Slot Resume\n");
13490         rtnl_lock();
13491
13492         err = bnxt_hwrm_func_qcaps(bp);
13493         if (!err && netif_running(netdev))
13494                 err = bnxt_open(netdev);
13495
13496         bnxt_ulp_start(bp, err);
13497         if (!err) {
13498                 bnxt_reenable_sriov(bp);
13499                 netif_device_attach(netdev);
13500         }
13501
13502         rtnl_unlock();
13503 }
13504
13505 static const struct pci_error_handlers bnxt_err_handler = {
13506         .error_detected = bnxt_io_error_detected,
13507         .slot_reset     = bnxt_io_slot_reset,
13508         .resume         = bnxt_io_resume
13509 };
13510
13511 static struct pci_driver bnxt_pci_driver = {
13512         .name           = DRV_MODULE_NAME,
13513         .id_table       = bnxt_pci_tbl,
13514         .probe          = bnxt_init_one,
13515         .remove         = bnxt_remove_one,
13516         .shutdown       = bnxt_shutdown,
13517         .driver.pm      = BNXT_PM_OPS,
13518         .err_handler    = &bnxt_err_handler,
13519 #if defined(CONFIG_BNXT_SRIOV)
13520         .sriov_configure = bnxt_sriov_configure,
13521 #endif
13522 };
13523
13524 static int __init bnxt_init(void)
13525 {
13526         bnxt_debug_init();
13527         return pci_register_driver(&bnxt_pci_driver);
13528 }
13529
13530 static void __exit bnxt_exit(void)
13531 {
13532         pci_unregister_driver(&bnxt_pci_driver);
13533         if (bnxt_pf_wq)
13534                 destroy_workqueue(bnxt_pf_wq);
13535         bnxt_debug_exit();
13536 }
13537
13538 module_init(bnxt_init);
13539 module_exit(bnxt_exit);