1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
71 #define BNXT_TX_TIMEOUT (5 * HZ)
72 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW)
74 MODULE_LICENSE("GPL");
75 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
77 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
78 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
79 #define BNXT_RX_COPY_THRESH 256
81 #define BNXT_TX_PUSH_THRESH 164
128 NETXTREME_E_P5_VF_HV,
131 /* indexed by enum above */
132 static const struct {
135 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
136 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
137 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
138 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
139 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
140 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
141 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
142 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
143 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
144 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
145 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
146 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
147 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
148 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
149 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
151 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
152 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
153 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
154 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
155 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
156 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
157 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
158 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
159 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
160 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
161 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
162 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
163 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
164 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
165 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
166 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
167 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
168 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
169 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
170 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
171 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
172 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
173 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
174 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
175 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
176 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
177 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
178 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
179 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
182 static const struct pci_device_id bnxt_pci_tbl[] = {
183 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
184 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
185 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
186 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
187 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
188 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
189 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
190 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
191 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
192 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
193 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
194 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
195 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
196 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
197 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
198 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
199 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
200 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
201 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
202 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
203 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
204 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
205 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
206 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
207 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
208 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
209 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
210 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
211 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
212 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
213 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
214 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
215 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
216 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
217 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
218 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
219 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
220 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
221 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
222 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
223 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
224 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
225 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
226 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
227 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
228 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
229 #ifdef CONFIG_BNXT_SRIOV
230 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
231 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
232 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
233 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
234 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
235 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
236 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
237 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
238 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
239 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
240 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
241 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
242 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
243 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
244 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
245 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
246 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
247 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
248 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
249 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
250 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
255 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
257 static const u16 bnxt_vf_req_snif[] = {
261 HWRM_CFA_L2_FILTER_ALLOC,
264 static const u16 bnxt_async_events_arr[] = {
265 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
266 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
267 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
268 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
269 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
270 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
271 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
272 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
273 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
274 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
275 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
276 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
279 static struct workqueue_struct *bnxt_pf_wq;
281 static bool bnxt_vf_pciid(enum board_idx idx)
283 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
284 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
285 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
286 idx == NETXTREME_E_P5_VF_HV);
289 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
290 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
291 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
293 #define BNXT_CP_DB_IRQ_DIS(db) \
294 writel(DB_CP_IRQ_DIS_FLAGS, db)
296 #define BNXT_DB_CQ(db, idx) \
297 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
299 #define BNXT_DB_NQ_P5(db, idx) \
300 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
302 #define BNXT_DB_CQ_ARM(db, idx) \
303 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
305 #define BNXT_DB_NQ_ARM_P5(db, idx) \
306 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
308 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
310 if (bp->flags & BNXT_FLAG_CHIP_P5)
311 BNXT_DB_NQ_P5(db, idx);
316 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
318 if (bp->flags & BNXT_FLAG_CHIP_P5)
319 BNXT_DB_NQ_ARM_P5(db, idx);
321 BNXT_DB_CQ_ARM(db, idx);
324 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
326 if (bp->flags & BNXT_FLAG_CHIP_P5)
327 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
333 const u16 bnxt_lhint_arr[] = {
334 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
335 TX_BD_FLAGS_LHINT_512_TO_1023,
336 TX_BD_FLAGS_LHINT_1024_TO_2047,
337 TX_BD_FLAGS_LHINT_1024_TO_2047,
338 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
339 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
340 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
341 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
342 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
343 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
344 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
345 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
346 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
347 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
348 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
357 struct metadata_dst *md_dst = skb_metadata_dst(skb);
359 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
362 return md_dst->u.port_info.port_id;
365 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
367 struct bnxt *bp = netdev_priv(dev);
369 struct tx_bd_ext *txbd1;
370 struct netdev_queue *txq;
373 unsigned int length, pad = 0;
374 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
376 struct pci_dev *pdev = bp->pdev;
377 struct bnxt_tx_ring_info *txr;
378 struct bnxt_sw_tx_bd *tx_buf;
381 i = skb_get_queue_mapping(skb);
382 if (unlikely(i >= bp->tx_nr_rings)) {
383 dev_kfree_skb_any(skb);
387 txq = netdev_get_tx_queue(dev, i);
388 txr = &bp->tx_ring[bp->tx_ring_map[i]];
391 free_size = bnxt_tx_avail(bp, txr);
392 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
393 netif_tx_stop_queue(txq);
394 return NETDEV_TX_BUSY;
398 len = skb_headlen(skb);
399 last_frag = skb_shinfo(skb)->nr_frags;
401 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
403 txbd->tx_bd_opaque = prod;
405 tx_buf = &txr->tx_buf_ring[prod];
407 tx_buf->nr_frags = last_frag;
410 cfa_action = bnxt_xmit_get_cfa_action(skb);
411 if (skb_vlan_tag_present(skb)) {
412 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
413 skb_vlan_tag_get(skb);
414 /* Currently supports 8021Q, 8021AD vlan offloads
415 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
417 if (skb->vlan_proto == htons(ETH_P_8021Q))
418 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
421 if (unlikely(skb->no_fcs)) {
422 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
426 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
427 struct tx_push_buffer *tx_push_buf = txr->tx_push;
428 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
429 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
430 void __iomem *db = txr->tx_db.doorbell;
431 void *pdata = tx_push_buf->data;
435 /* Set COAL_NOW to be ready quickly for the next push */
436 tx_push->tx_bd_len_flags_type =
437 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
438 TX_BD_TYPE_LONG_TX_BD |
439 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
440 TX_BD_FLAGS_COAL_NOW |
441 TX_BD_FLAGS_PACKET_END |
442 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
444 if (skb->ip_summed == CHECKSUM_PARTIAL)
445 tx_push1->tx_bd_hsize_lflags =
446 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
448 tx_push1->tx_bd_hsize_lflags = 0;
450 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
451 tx_push1->tx_bd_cfa_action =
452 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
454 end = pdata + length;
455 end = PTR_ALIGN(end, 8) - 1;
458 skb_copy_from_linear_data(skb, pdata, len);
460 for (j = 0; j < last_frag; j++) {
461 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
464 fptr = skb_frag_address_safe(frag);
468 memcpy(pdata, fptr, skb_frag_size(frag));
469 pdata += skb_frag_size(frag);
472 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
473 txbd->tx_bd_haddr = txr->data_mapping;
474 prod = NEXT_TX(prod);
475 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
476 memcpy(txbd, tx_push1, sizeof(*txbd));
477 prod = NEXT_TX(prod);
479 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
483 netdev_tx_sent_queue(txq, skb->len);
484 wmb(); /* Sync is_push and byte queue before pushing data */
486 push_len = (length + sizeof(*tx_push) + 7) / 8;
488 __iowrite64_copy(db, tx_push_buf, 16);
489 __iowrite32_copy(db + 4, tx_push_buf + 1,
490 (push_len - 16) << 1);
492 __iowrite64_copy(db, tx_push_buf, push_len);
499 if (length < BNXT_MIN_PKT_SIZE) {
500 pad = BNXT_MIN_PKT_SIZE - length;
501 if (skb_pad(skb, pad)) {
502 /* SKB already freed. */
506 length = BNXT_MIN_PKT_SIZE;
509 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
511 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
512 dev_kfree_skb_any(skb);
517 dma_unmap_addr_set(tx_buf, mapping, mapping);
518 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
519 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
521 txbd->tx_bd_haddr = cpu_to_le64(mapping);
523 prod = NEXT_TX(prod);
524 txbd1 = (struct tx_bd_ext *)
525 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
527 txbd1->tx_bd_hsize_lflags = lflags;
528 if (skb_is_gso(skb)) {
531 if (skb->encapsulation)
532 hdr_len = skb_inner_network_offset(skb) +
533 skb_inner_network_header_len(skb) +
534 inner_tcp_hdrlen(skb);
536 hdr_len = skb_transport_offset(skb) +
539 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
541 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
542 length = skb_shinfo(skb)->gso_size;
543 txbd1->tx_bd_mss = cpu_to_le32(length);
545 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 txbd1->tx_bd_hsize_lflags |=
547 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
548 txbd1->tx_bd_mss = 0;
552 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
553 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
558 flags |= bnxt_lhint_arr[length];
559 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
561 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
562 txbd1->tx_bd_cfa_action =
563 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
564 for (i = 0; i < last_frag; i++) {
565 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
567 prod = NEXT_TX(prod);
568 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
570 len = skb_frag_size(frag);
571 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
574 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
577 tx_buf = &txr->tx_buf_ring[prod];
578 dma_unmap_addr_set(tx_buf, mapping, mapping);
580 txbd->tx_bd_haddr = cpu_to_le64(mapping);
582 flags = len << TX_BD_LEN_SHIFT;
583 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
587 txbd->tx_bd_len_flags_type =
588 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
589 TX_BD_FLAGS_PACKET_END);
591 netdev_tx_sent_queue(txq, skb->len);
593 /* Sync BD data before updating doorbell */
596 prod = NEXT_TX(prod);
599 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
600 bnxt_db_write(bp, &txr->tx_db, prod);
604 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
605 if (netdev_xmit_more() && !tx_buf->is_push)
606 bnxt_db_write(bp, &txr->tx_db, prod);
608 netif_tx_stop_queue(txq);
610 /* netif_tx_stop_queue() must be done before checking
611 * tx index in bnxt_tx_avail() below, because in
612 * bnxt_tx_int(), we update tx index before checking for
613 * netif_tx_queue_stopped().
616 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
617 netif_tx_wake_queue(txq);
624 /* start back at beginning and unmap skb */
626 tx_buf = &txr->tx_buf_ring[prod];
628 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
629 skb_headlen(skb), PCI_DMA_TODEVICE);
630 prod = NEXT_TX(prod);
632 /* unmap remaining mapped pages */
633 for (i = 0; i < last_frag; i++) {
634 prod = NEXT_TX(prod);
635 tx_buf = &txr->tx_buf_ring[prod];
636 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
637 skb_frag_size(&skb_shinfo(skb)->frags[i]),
641 dev_kfree_skb_any(skb);
645 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
647 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
648 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
649 u16 cons = txr->tx_cons;
650 struct pci_dev *pdev = bp->pdev;
652 unsigned int tx_bytes = 0;
654 for (i = 0; i < nr_pkts; i++) {
655 struct bnxt_sw_tx_bd *tx_buf;
659 tx_buf = &txr->tx_buf_ring[cons];
660 cons = NEXT_TX(cons);
664 if (tx_buf->is_push) {
669 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
670 skb_headlen(skb), PCI_DMA_TODEVICE);
671 last = tx_buf->nr_frags;
673 for (j = 0; j < last; j++) {
674 cons = NEXT_TX(cons);
675 tx_buf = &txr->tx_buf_ring[cons];
678 dma_unmap_addr(tx_buf, mapping),
679 skb_frag_size(&skb_shinfo(skb)->frags[j]),
684 cons = NEXT_TX(cons);
686 tx_bytes += skb->len;
687 dev_kfree_skb_any(skb);
690 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
693 /* Need to make the tx_cons update visible to bnxt_start_xmit()
694 * before checking for netif_tx_queue_stopped(). Without the
695 * memory barrier, there is a small possibility that bnxt_start_xmit()
696 * will miss it and cause the queue to be stopped forever.
700 if (unlikely(netif_tx_queue_stopped(txq)) &&
701 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
702 __netif_tx_lock(txq, smp_processor_id());
703 if (netif_tx_queue_stopped(txq) &&
704 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
705 txr->dev_state != BNXT_DEV_STATE_CLOSING)
706 netif_tx_wake_queue(txq);
707 __netif_tx_unlock(txq);
711 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
712 struct bnxt_rx_ring_info *rxr,
715 struct device *dev = &bp->pdev->dev;
718 page = page_pool_dev_alloc_pages(rxr->page_pool);
722 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
723 DMA_ATTR_WEAK_ORDERING);
724 if (dma_mapping_error(dev, *mapping)) {
725 page_pool_recycle_direct(rxr->page_pool, page);
728 *mapping += bp->rx_dma_offset;
732 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
736 struct pci_dev *pdev = bp->pdev;
738 data = kmalloc(bp->rx_buf_size, gfp);
742 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
743 bp->rx_buf_use_size, bp->rx_dir,
744 DMA_ATTR_WEAK_ORDERING);
746 if (dma_mapping_error(&pdev->dev, *mapping)) {
753 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
756 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
757 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
760 if (BNXT_RX_PAGE_MODE(bp)) {
762 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
768 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
770 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
776 rx_buf->data_ptr = data + bp->rx_offset;
778 rx_buf->mapping = mapping;
780 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
784 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
786 u16 prod = rxr->rx_prod;
787 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
788 struct rx_bd *cons_bd, *prod_bd;
790 prod_rx_buf = &rxr->rx_buf_ring[prod];
791 cons_rx_buf = &rxr->rx_buf_ring[cons];
793 prod_rx_buf->data = data;
794 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
796 prod_rx_buf->mapping = cons_rx_buf->mapping;
798 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
799 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
801 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
804 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
806 u16 next, max = rxr->rx_agg_bmap_size;
808 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
810 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
814 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
815 struct bnxt_rx_ring_info *rxr,
819 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
820 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
821 struct pci_dev *pdev = bp->pdev;
824 u16 sw_prod = rxr->rx_sw_agg_prod;
825 unsigned int offset = 0;
827 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
830 page = alloc_page(gfp);
834 rxr->rx_page_offset = 0;
836 offset = rxr->rx_page_offset;
837 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
838 if (rxr->rx_page_offset == PAGE_SIZE)
843 page = alloc_page(gfp);
848 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
849 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
850 DMA_ATTR_WEAK_ORDERING);
851 if (dma_mapping_error(&pdev->dev, mapping)) {
856 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
857 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
859 __set_bit(sw_prod, rxr->rx_agg_bmap);
860 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
861 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
863 rx_agg_buf->page = page;
864 rx_agg_buf->offset = offset;
865 rx_agg_buf->mapping = mapping;
866 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
867 rxbd->rx_bd_opaque = sw_prod;
871 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
872 struct bnxt_cp_ring_info *cpr,
873 u16 cp_cons, u16 curr)
875 struct rx_agg_cmp *agg;
877 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
878 agg = (struct rx_agg_cmp *)
879 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
883 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
884 struct bnxt_rx_ring_info *rxr,
885 u16 agg_id, u16 curr)
887 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
889 return &tpa_info->agg_arr[curr];
892 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
893 u16 start, u32 agg_bufs, bool tpa)
895 struct bnxt_napi *bnapi = cpr->bnapi;
896 struct bnxt *bp = bnapi->bp;
897 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
898 u16 prod = rxr->rx_agg_prod;
899 u16 sw_prod = rxr->rx_sw_agg_prod;
903 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
906 for (i = 0; i < agg_bufs; i++) {
908 struct rx_agg_cmp *agg;
909 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
910 struct rx_bd *prod_bd;
914 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
916 agg = bnxt_get_agg(bp, cpr, idx, start + i);
917 cons = agg->rx_agg_cmp_opaque;
918 __clear_bit(cons, rxr->rx_agg_bmap);
920 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
921 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
923 __set_bit(sw_prod, rxr->rx_agg_bmap);
924 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
925 cons_rx_buf = &rxr->rx_agg_ring[cons];
927 /* It is possible for sw_prod to be equal to cons, so
928 * set cons_rx_buf->page to NULL first.
930 page = cons_rx_buf->page;
931 cons_rx_buf->page = NULL;
932 prod_rx_buf->page = page;
933 prod_rx_buf->offset = cons_rx_buf->offset;
935 prod_rx_buf->mapping = cons_rx_buf->mapping;
937 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
939 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
940 prod_bd->rx_bd_opaque = sw_prod;
942 prod = NEXT_RX_AGG(prod);
943 sw_prod = NEXT_RX_AGG(sw_prod);
945 rxr->rx_agg_prod = prod;
946 rxr->rx_sw_agg_prod = sw_prod;
949 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
950 struct bnxt_rx_ring_info *rxr,
951 u16 cons, void *data, u8 *data_ptr,
953 unsigned int offset_and_len)
955 unsigned int payload = offset_and_len >> 16;
956 unsigned int len = offset_and_len & 0xffff;
958 struct page *page = data;
959 u16 prod = rxr->rx_prod;
963 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
965 bnxt_reuse_rx_data(rxr, cons, data);
968 dma_addr -= bp->rx_dma_offset;
969 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
970 DMA_ATTR_WEAK_ORDERING);
971 page_pool_release_page(rxr->page_pool, page);
973 if (unlikely(!payload))
974 payload = eth_get_headlen(bp->dev, data_ptr, len);
976 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
982 off = (void *)data_ptr - page_address(page);
983 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
984 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
985 payload + NET_IP_ALIGN);
987 frag = &skb_shinfo(skb)->frags[0];
988 skb_frag_size_sub(frag, payload);
989 skb_frag_off_add(frag, payload);
990 skb->data_len -= payload;
991 skb->tail += payload;
996 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
997 struct bnxt_rx_ring_info *rxr, u16 cons,
998 void *data, u8 *data_ptr,
1000 unsigned int offset_and_len)
1002 u16 prod = rxr->rx_prod;
1003 struct sk_buff *skb;
1006 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1007 if (unlikely(err)) {
1008 bnxt_reuse_rx_data(rxr, cons, data);
1012 skb = build_skb(data, 0);
1013 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1014 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1020 skb_reserve(skb, bp->rx_offset);
1021 skb_put(skb, offset_and_len & 0xffff);
1025 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1026 struct bnxt_cp_ring_info *cpr,
1027 struct sk_buff *skb, u16 idx,
1028 u32 agg_bufs, bool tpa)
1030 struct bnxt_napi *bnapi = cpr->bnapi;
1031 struct pci_dev *pdev = bp->pdev;
1032 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1033 u16 prod = rxr->rx_agg_prod;
1034 bool p5_tpa = false;
1037 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1040 for (i = 0; i < agg_bufs; i++) {
1042 struct rx_agg_cmp *agg;
1043 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1048 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1050 agg = bnxt_get_agg(bp, cpr, idx, i);
1051 cons = agg->rx_agg_cmp_opaque;
1052 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1053 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1055 cons_rx_buf = &rxr->rx_agg_ring[cons];
1056 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1057 cons_rx_buf->offset, frag_len);
1058 __clear_bit(cons, rxr->rx_agg_bmap);
1060 /* It is possible for bnxt_alloc_rx_page() to allocate
1061 * a sw_prod index that equals the cons index, so we
1062 * need to clear the cons entry now.
1064 mapping = cons_rx_buf->mapping;
1065 page = cons_rx_buf->page;
1066 cons_rx_buf->page = NULL;
1068 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1069 struct skb_shared_info *shinfo;
1070 unsigned int nr_frags;
1072 shinfo = skb_shinfo(skb);
1073 nr_frags = --shinfo->nr_frags;
1074 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1078 cons_rx_buf->page = page;
1080 /* Update prod since possibly some pages have been
1081 * allocated already.
1083 rxr->rx_agg_prod = prod;
1084 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1088 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1090 DMA_ATTR_WEAK_ORDERING);
1092 skb->data_len += frag_len;
1093 skb->len += frag_len;
1094 skb->truesize += PAGE_SIZE;
1096 prod = NEXT_RX_AGG(prod);
1098 rxr->rx_agg_prod = prod;
1102 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1103 u8 agg_bufs, u32 *raw_cons)
1106 struct rx_agg_cmp *agg;
1108 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1109 last = RING_CMP(*raw_cons);
1110 agg = (struct rx_agg_cmp *)
1111 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1112 return RX_AGG_CMP_VALID(agg, *raw_cons);
1115 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1119 struct bnxt *bp = bnapi->bp;
1120 struct pci_dev *pdev = bp->pdev;
1121 struct sk_buff *skb;
1123 skb = napi_alloc_skb(&bnapi->napi, len);
1127 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1130 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1131 len + NET_IP_ALIGN);
1133 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1140 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1141 u32 *raw_cons, void *cmp)
1143 struct rx_cmp *rxcmp = cmp;
1144 u32 tmp_raw_cons = *raw_cons;
1145 u8 cmp_type, agg_bufs = 0;
1147 cmp_type = RX_CMP_TYPE(rxcmp);
1149 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1150 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1152 RX_CMP_AGG_BUFS_SHIFT;
1153 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1154 struct rx_tpa_end_cmp *tpa_end = cmp;
1156 if (bp->flags & BNXT_FLAG_CHIP_P5)
1159 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1163 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1166 *raw_cons = tmp_raw_cons;
1170 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1172 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1176 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1178 schedule_delayed_work(&bp->fw_reset_task, delay);
1181 static void bnxt_queue_sp_work(struct bnxt *bp)
1184 queue_work(bnxt_pf_wq, &bp->sp_task);
1186 schedule_work(&bp->sp_task);
1189 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1191 if (!rxr->bnapi->in_reset) {
1192 rxr->bnapi->in_reset = true;
1193 if (bp->flags & BNXT_FLAG_CHIP_P5)
1194 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1196 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1197 bnxt_queue_sp_work(bp);
1199 rxr->rx_next_cons = 0xffff;
1202 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1204 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1205 u16 idx = agg_id & MAX_TPA_P5_MASK;
1207 if (test_bit(idx, map->agg_idx_bmap))
1208 idx = find_first_zero_bit(map->agg_idx_bmap,
1209 BNXT_AGG_IDX_BMAP_SIZE);
1210 __set_bit(idx, map->agg_idx_bmap);
1211 map->agg_id_tbl[agg_id] = idx;
1215 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1217 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1219 __clear_bit(idx, map->agg_idx_bmap);
1222 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1224 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1226 return map->agg_id_tbl[agg_id];
1229 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1230 struct rx_tpa_start_cmp *tpa_start,
1231 struct rx_tpa_start_cmp_ext *tpa_start1)
1233 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1234 struct bnxt_tpa_info *tpa_info;
1235 u16 cons, prod, agg_id;
1236 struct rx_bd *prod_bd;
1239 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1240 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1241 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1243 agg_id = TPA_START_AGG_ID(tpa_start);
1245 cons = tpa_start->rx_tpa_start_cmp_opaque;
1246 prod = rxr->rx_prod;
1247 cons_rx_buf = &rxr->rx_buf_ring[cons];
1248 prod_rx_buf = &rxr->rx_buf_ring[prod];
1249 tpa_info = &rxr->rx_tpa[agg_id];
1251 if (unlikely(cons != rxr->rx_next_cons ||
1252 TPA_START_ERROR(tpa_start))) {
1253 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1254 cons, rxr->rx_next_cons,
1255 TPA_START_ERROR_CODE(tpa_start1));
1256 bnxt_sched_reset(bp, rxr);
1259 /* Store cfa_code in tpa_info to use in tpa_end
1260 * completion processing.
1262 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1263 prod_rx_buf->data = tpa_info->data;
1264 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1266 mapping = tpa_info->mapping;
1267 prod_rx_buf->mapping = mapping;
1269 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1271 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1273 tpa_info->data = cons_rx_buf->data;
1274 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1275 cons_rx_buf->data = NULL;
1276 tpa_info->mapping = cons_rx_buf->mapping;
1279 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1280 RX_TPA_START_CMP_LEN_SHIFT;
1281 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1282 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1284 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1285 tpa_info->gso_type = SKB_GSO_TCPV4;
1286 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1287 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1288 tpa_info->gso_type = SKB_GSO_TCPV6;
1289 tpa_info->rss_hash =
1290 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1292 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1293 tpa_info->gso_type = 0;
1294 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1296 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1297 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1298 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1299 tpa_info->agg_count = 0;
1301 rxr->rx_prod = NEXT_RX(prod);
1302 cons = NEXT_RX(cons);
1303 rxr->rx_next_cons = NEXT_RX(cons);
1304 cons_rx_buf = &rxr->rx_buf_ring[cons];
1306 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1307 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1308 cons_rx_buf->data = NULL;
1311 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1314 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1318 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1320 struct udphdr *uh = NULL;
1322 if (ip_proto == htons(ETH_P_IP)) {
1323 struct iphdr *iph = (struct iphdr *)skb->data;
1325 if (iph->protocol == IPPROTO_UDP)
1326 uh = (struct udphdr *)(iph + 1);
1328 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1330 if (iph->nexthdr == IPPROTO_UDP)
1331 uh = (struct udphdr *)(iph + 1);
1335 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1337 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1342 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1343 int payload_off, int tcp_ts,
1344 struct sk_buff *skb)
1349 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1350 u32 hdr_info = tpa_info->hdr_info;
1351 bool loopback = false;
1353 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1354 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1355 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1357 /* If the packet is an internal loopback packet, the offsets will
1358 * have an extra 4 bytes.
1360 if (inner_mac_off == 4) {
1362 } else if (inner_mac_off > 4) {
1363 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1366 /* We only support inner iPv4/ipv6. If we don't see the
1367 * correct protocol ID, it must be a loopback packet where
1368 * the offsets are off by 4.
1370 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1374 /* internal loopback packet, subtract all offsets by 4 */
1380 nw_off = inner_ip_off - ETH_HLEN;
1381 skb_set_network_header(skb, nw_off);
1382 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1383 struct ipv6hdr *iph = ipv6_hdr(skb);
1385 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1386 len = skb->len - skb_transport_offset(skb);
1388 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1390 struct iphdr *iph = ip_hdr(skb);
1392 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1393 len = skb->len - skb_transport_offset(skb);
1395 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1398 if (inner_mac_off) { /* tunnel */
1399 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1402 bnxt_gro_tunnel(skb, proto);
1408 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1409 int payload_off, int tcp_ts,
1410 struct sk_buff *skb)
1413 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1414 u32 hdr_info = tpa_info->hdr_info;
1415 int iphdr_len, nw_off;
1417 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1418 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1419 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1421 nw_off = inner_ip_off - ETH_HLEN;
1422 skb_set_network_header(skb, nw_off);
1423 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1424 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1425 skb_set_transport_header(skb, nw_off + iphdr_len);
1427 if (inner_mac_off) { /* tunnel */
1428 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1431 bnxt_gro_tunnel(skb, proto);
1437 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1438 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1440 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1441 int payload_off, int tcp_ts,
1442 struct sk_buff *skb)
1446 int len, nw_off, tcp_opt_len = 0;
1451 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1454 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1456 skb_set_network_header(skb, nw_off);
1458 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1459 len = skb->len - skb_transport_offset(skb);
1461 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1462 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1463 struct ipv6hdr *iph;
1465 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1467 skb_set_network_header(skb, nw_off);
1468 iph = ipv6_hdr(skb);
1469 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1470 len = skb->len - skb_transport_offset(skb);
1472 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1474 dev_kfree_skb_any(skb);
1478 if (nw_off) /* tunnel */
1479 bnxt_gro_tunnel(skb, skb->protocol);
1484 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1485 struct bnxt_tpa_info *tpa_info,
1486 struct rx_tpa_end_cmp *tpa_end,
1487 struct rx_tpa_end_cmp_ext *tpa_end1,
1488 struct sk_buff *skb)
1494 segs = TPA_END_TPA_SEGS(tpa_end);
1498 NAPI_GRO_CB(skb)->count = segs;
1499 skb_shinfo(skb)->gso_size =
1500 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1501 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1502 if (bp->flags & BNXT_FLAG_CHIP_P5)
1503 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1505 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1506 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1508 tcp_gro_complete(skb);
1513 /* Given the cfa_code of a received packet determine which
1514 * netdev (vf-rep or PF) the packet is destined to.
1516 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1518 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1520 /* if vf-rep dev is NULL, the must belongs to the PF */
1521 return dev ? dev : bp->dev;
1524 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1525 struct bnxt_cp_ring_info *cpr,
1527 struct rx_tpa_end_cmp *tpa_end,
1528 struct rx_tpa_end_cmp_ext *tpa_end1,
1531 struct bnxt_napi *bnapi = cpr->bnapi;
1532 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1533 u8 *data_ptr, agg_bufs;
1535 struct bnxt_tpa_info *tpa_info;
1537 struct sk_buff *skb;
1538 u16 idx = 0, agg_id;
1542 if (unlikely(bnapi->in_reset)) {
1543 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1546 return ERR_PTR(-EBUSY);
1550 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1551 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1552 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1553 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1554 tpa_info = &rxr->rx_tpa[agg_id];
1555 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1556 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1557 agg_bufs, tpa_info->agg_count);
1558 agg_bufs = tpa_info->agg_count;
1560 tpa_info->agg_count = 0;
1561 *event |= BNXT_AGG_EVENT;
1562 bnxt_free_agg_idx(rxr, agg_id);
1564 gro = !!(bp->flags & BNXT_FLAG_GRO);
1566 agg_id = TPA_END_AGG_ID(tpa_end);
1567 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1568 tpa_info = &rxr->rx_tpa[agg_id];
1569 idx = RING_CMP(*raw_cons);
1571 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1572 return ERR_PTR(-EBUSY);
1574 *event |= BNXT_AGG_EVENT;
1575 idx = NEXT_CMP(idx);
1577 gro = !!TPA_END_GRO(tpa_end);
1579 data = tpa_info->data;
1580 data_ptr = tpa_info->data_ptr;
1582 len = tpa_info->len;
1583 mapping = tpa_info->mapping;
1585 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1586 bnxt_abort_tpa(cpr, idx, agg_bufs);
1587 if (agg_bufs > MAX_SKB_FRAGS)
1588 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1589 agg_bufs, (int)MAX_SKB_FRAGS);
1593 if (len <= bp->rx_copy_thresh) {
1594 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1596 bnxt_abort_tpa(cpr, idx, agg_bufs);
1601 dma_addr_t new_mapping;
1603 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1605 bnxt_abort_tpa(cpr, idx, agg_bufs);
1609 tpa_info->data = new_data;
1610 tpa_info->data_ptr = new_data + bp->rx_offset;
1611 tpa_info->mapping = new_mapping;
1613 skb = build_skb(data, 0);
1614 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1615 bp->rx_buf_use_size, bp->rx_dir,
1616 DMA_ATTR_WEAK_ORDERING);
1620 bnxt_abort_tpa(cpr, idx, agg_bufs);
1623 skb_reserve(skb, bp->rx_offset);
1628 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1630 /* Page reuse already handled by bnxt_rx_pages(). */
1636 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1638 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1639 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1641 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1642 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1643 u16 vlan_proto = tpa_info->metadata >>
1644 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1645 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1647 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1650 skb_checksum_none_assert(skb);
1651 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1652 skb->ip_summed = CHECKSUM_UNNECESSARY;
1654 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1658 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1663 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1664 struct rx_agg_cmp *rx_agg)
1666 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1667 struct bnxt_tpa_info *tpa_info;
1669 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1670 tpa_info = &rxr->rx_tpa[agg_id];
1671 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1672 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1675 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1676 struct sk_buff *skb)
1678 if (skb->dev != bp->dev) {
1679 /* this packet belongs to a vf-rep */
1680 bnxt_vf_rep_rx(bp, skb);
1683 skb_record_rx_queue(skb, bnapi->index);
1684 napi_gro_receive(&bnapi->napi, skb);
1687 /* returns the following:
1688 * 1 - 1 packet successfully received
1689 * 0 - successful TPA_START, packet not completed yet
1690 * -EBUSY - completion ring does not have all the agg buffers yet
1691 * -ENOMEM - packet aborted due to out of memory
1692 * -EIO - packet aborted due to hw error indicated in BD
1694 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1695 u32 *raw_cons, u8 *event)
1697 struct bnxt_napi *bnapi = cpr->bnapi;
1698 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1699 struct net_device *dev = bp->dev;
1700 struct rx_cmp *rxcmp;
1701 struct rx_cmp_ext *rxcmp1;
1702 u32 tmp_raw_cons = *raw_cons;
1703 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1704 struct bnxt_sw_rx_bd *rx_buf;
1706 u8 *data_ptr, agg_bufs, cmp_type;
1707 dma_addr_t dma_addr;
1708 struct sk_buff *skb;
1713 rxcmp = (struct rx_cmp *)
1714 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1716 cmp_type = RX_CMP_TYPE(rxcmp);
1718 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1719 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1720 goto next_rx_no_prod_no_len;
1723 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1724 cp_cons = RING_CMP(tmp_raw_cons);
1725 rxcmp1 = (struct rx_cmp_ext *)
1726 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1728 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1731 prod = rxr->rx_prod;
1733 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1734 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1735 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1737 *event |= BNXT_RX_EVENT;
1738 goto next_rx_no_prod_no_len;
1740 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1741 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1742 (struct rx_tpa_end_cmp *)rxcmp,
1743 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1750 bnxt_deliver_skb(bp, bnapi, skb);
1753 *event |= BNXT_RX_EVENT;
1754 goto next_rx_no_prod_no_len;
1757 cons = rxcmp->rx_cmp_opaque;
1758 if (unlikely(cons != rxr->rx_next_cons)) {
1759 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1761 /* 0xffff is forced error, don't print it */
1762 if (rxr->rx_next_cons != 0xffff)
1763 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1764 cons, rxr->rx_next_cons);
1765 bnxt_sched_reset(bp, rxr);
1768 goto next_rx_no_prod_no_len;
1770 rx_buf = &rxr->rx_buf_ring[cons];
1771 data = rx_buf->data;
1772 data_ptr = rx_buf->data_ptr;
1775 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1776 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1779 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1782 cp_cons = NEXT_CMP(cp_cons);
1783 *event |= BNXT_AGG_EVENT;
1785 *event |= BNXT_RX_EVENT;
1787 rx_buf->data = NULL;
1788 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1789 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1791 bnxt_reuse_rx_data(rxr, cons, data);
1793 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1797 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1798 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1799 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1800 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1801 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1803 bnxt_sched_reset(bp, rxr);
1806 goto next_rx_no_len;
1809 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1810 dma_addr = rx_buf->mapping;
1812 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1817 if (len <= bp->rx_copy_thresh) {
1818 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1819 bnxt_reuse_rx_data(rxr, cons, data);
1822 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1830 if (rx_buf->data_ptr == data_ptr)
1831 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1834 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1843 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1850 if (RX_CMP_HASH_VALID(rxcmp)) {
1851 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1852 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1854 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1855 if (hash_type != 1 && hash_type != 3)
1856 type = PKT_HASH_TYPE_L3;
1857 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1860 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1861 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1863 if ((rxcmp1->rx_cmp_flags2 &
1864 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1865 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1866 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1867 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1868 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1870 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1873 skb_checksum_none_assert(skb);
1874 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1875 if (dev->features & NETIF_F_RXCSUM) {
1876 skb->ip_summed = CHECKSUM_UNNECESSARY;
1877 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1880 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1881 if (dev->features & NETIF_F_RXCSUM)
1882 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1886 bnxt_deliver_skb(bp, bnapi, skb);
1890 cpr->rx_packets += 1;
1891 cpr->rx_bytes += len;
1894 rxr->rx_prod = NEXT_RX(prod);
1895 rxr->rx_next_cons = NEXT_RX(cons);
1897 next_rx_no_prod_no_len:
1898 *raw_cons = tmp_raw_cons;
1903 /* In netpoll mode, if we are using a combined completion ring, we need to
1904 * discard the rx packets and recycle the buffers.
1906 static int bnxt_force_rx_discard(struct bnxt *bp,
1907 struct bnxt_cp_ring_info *cpr,
1908 u32 *raw_cons, u8 *event)
1910 u32 tmp_raw_cons = *raw_cons;
1911 struct rx_cmp_ext *rxcmp1;
1912 struct rx_cmp *rxcmp;
1916 cp_cons = RING_CMP(tmp_raw_cons);
1917 rxcmp = (struct rx_cmp *)
1918 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1920 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1921 cp_cons = RING_CMP(tmp_raw_cons);
1922 rxcmp1 = (struct rx_cmp_ext *)
1923 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1925 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1928 cmp_type = RX_CMP_TYPE(rxcmp);
1929 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1930 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1931 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1932 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1933 struct rx_tpa_end_cmp_ext *tpa_end1;
1935 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1936 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1937 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1939 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1942 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1944 struct bnxt_fw_health *fw_health = bp->fw_health;
1945 u32 reg = fw_health->regs[reg_idx];
1946 u32 reg_type, reg_off, val = 0;
1948 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1949 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1951 case BNXT_FW_HEALTH_REG_TYPE_CFG:
1952 pci_read_config_dword(bp->pdev, reg_off, &val);
1954 case BNXT_FW_HEALTH_REG_TYPE_GRC:
1955 reg_off = fw_health->mapped_regs[reg_idx];
1957 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1958 val = readl(bp->bar0 + reg_off);
1960 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1961 val = readl(bp->bar1 + reg_off);
1964 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1965 val &= fw_health->fw_reset_inprog_reg_mask;
1969 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
1973 for (i = 0; i < bp->rx_nr_rings; i++) {
1974 u16 grp_idx = bp->rx_ring[i].bnapi->index;
1975 struct bnxt_ring_grp_info *grp_info;
1977 grp_info = &bp->grp_info[grp_idx];
1978 if (grp_info->agg_fw_ring_id == ring_id)
1981 return INVALID_HW_RING_ID;
1984 #define BNXT_GET_EVENT_PORT(data) \
1986 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1988 #define BNXT_EVENT_RING_TYPE(data2) \
1990 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
1992 #define BNXT_EVENT_RING_TYPE_RX(data2) \
1993 (BNXT_EVENT_RING_TYPE(data2) == \
1994 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
1996 static int bnxt_async_event_process(struct bnxt *bp,
1997 struct hwrm_async_event_cmpl *cmpl)
1999 u16 event_id = le16_to_cpu(cmpl->event_id);
2000 u32 data1 = le32_to_cpu(cmpl->event_data1);
2001 u32 data2 = le32_to_cpu(cmpl->event_data2);
2003 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2005 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2006 struct bnxt_link_info *link_info = &bp->link_info;
2009 goto async_event_process_exit;
2011 /* print unsupported speed warning in forced speed mode only */
2012 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2013 (data1 & 0x20000)) {
2014 u16 fw_speed = link_info->force_link_speed;
2015 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2017 if (speed != SPEED_UNKNOWN)
2018 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2021 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2024 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2025 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2026 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2028 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2029 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2031 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2032 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2034 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2035 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2040 if (bp->pf.port_id != port_id)
2043 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2046 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2048 goto async_event_process_exit;
2049 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2051 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2052 char *fatal_str = "non-fatal";
2055 goto async_event_process_exit;
2057 bp->fw_reset_timestamp = jiffies;
2058 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2059 if (!bp->fw_reset_min_dsecs)
2060 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2061 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2062 if (!bp->fw_reset_max_dsecs)
2063 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2064 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2065 fatal_str = "fatal";
2066 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2068 netif_warn(bp, hw, bp->dev,
2069 "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2070 fatal_str, data1, data2,
2071 bp->fw_reset_min_dsecs * 100,
2072 bp->fw_reset_max_dsecs * 100);
2073 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2076 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2077 struct bnxt_fw_health *fw_health = bp->fw_health;
2080 goto async_event_process_exit;
2082 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2083 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2084 if (!fw_health->enabled) {
2085 netif_info(bp, drv, bp->dev,
2086 "Error recovery info: error recovery[0]\n");
2089 fw_health->tmr_multiplier =
2090 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2091 bp->current_interval * 10);
2092 fw_health->tmr_counter = fw_health->tmr_multiplier;
2093 fw_health->last_fw_heartbeat =
2094 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2095 fw_health->last_fw_reset_cnt =
2096 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2097 netif_info(bp, drv, bp->dev,
2098 "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2099 fw_health->master, fw_health->last_fw_reset_cnt,
2100 bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2101 goto async_event_process_exit;
2103 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2104 netif_notice(bp, hw, bp->dev,
2105 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2107 goto async_event_process_exit;
2108 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2109 struct bnxt_rx_ring_info *rxr;
2112 if (bp->flags & BNXT_FLAG_CHIP_P5)
2113 goto async_event_process_exit;
2115 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2116 BNXT_EVENT_RING_TYPE(data2), data1);
2117 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2118 goto async_event_process_exit;
2120 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2121 if (grp_idx == INVALID_HW_RING_ID) {
2122 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2124 goto async_event_process_exit;
2126 rxr = bp->bnapi[grp_idx]->rx_ring;
2127 bnxt_sched_reset(bp, rxr);
2128 goto async_event_process_exit;
2130 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2131 struct bnxt_fw_health *fw_health = bp->fw_health;
2133 netif_notice(bp, hw, bp->dev,
2134 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2137 fw_health->echo_req_data1 = data1;
2138 fw_health->echo_req_data2 = data2;
2139 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2142 goto async_event_process_exit;
2145 goto async_event_process_exit;
2147 bnxt_queue_sp_work(bp);
2148 async_event_process_exit:
2149 bnxt_ulp_async_events(bp, cmpl);
2153 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2155 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2156 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2157 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2158 (struct hwrm_fwd_req_cmpl *)txcmp;
2160 switch (cmpl_type) {
2161 case CMPL_BASE_TYPE_HWRM_DONE:
2162 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2163 if (seq_id == bp->hwrm_intr_seq_id)
2164 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2166 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2169 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2170 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2172 if ((vf_id < bp->pf.first_vf_id) ||
2173 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2174 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2179 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2180 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2181 bnxt_queue_sp_work(bp);
2184 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2185 bnxt_async_event_process(bp,
2186 (struct hwrm_async_event_cmpl *)txcmp);
2195 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2197 struct bnxt_napi *bnapi = dev_instance;
2198 struct bnxt *bp = bnapi->bp;
2199 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2200 u32 cons = RING_CMP(cpr->cp_raw_cons);
2203 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2204 napi_schedule(&bnapi->napi);
2208 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2210 u32 raw_cons = cpr->cp_raw_cons;
2211 u16 cons = RING_CMP(raw_cons);
2212 struct tx_cmp *txcmp;
2214 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2216 return TX_CMP_VALID(txcmp, raw_cons);
2219 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2221 struct bnxt_napi *bnapi = dev_instance;
2222 struct bnxt *bp = bnapi->bp;
2223 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2224 u32 cons = RING_CMP(cpr->cp_raw_cons);
2227 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2229 if (!bnxt_has_work(bp, cpr)) {
2230 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2231 /* return if erroneous interrupt */
2232 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2236 /* disable ring IRQ */
2237 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2239 /* Return here if interrupt is shared and is disabled. */
2240 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2243 napi_schedule(&bnapi->napi);
2247 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2250 struct bnxt_napi *bnapi = cpr->bnapi;
2251 u32 raw_cons = cpr->cp_raw_cons;
2256 struct tx_cmp *txcmp;
2258 cpr->has_more_work = 0;
2259 cpr->had_work_done = 1;
2263 cons = RING_CMP(raw_cons);
2264 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2266 if (!TX_CMP_VALID(txcmp, raw_cons))
2269 /* The valid test of the entry must be done first before
2270 * reading any further.
2273 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2275 /* return full budget so NAPI will complete. */
2276 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2278 raw_cons = NEXT_RAW_CMP(raw_cons);
2280 cpr->has_more_work = 1;
2283 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2285 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2287 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2289 if (likely(rc >= 0))
2291 /* Increment rx_pkts when rc is -ENOMEM to count towards
2292 * the NAPI budget. Otherwise, we may potentially loop
2293 * here forever if we consistently cannot allocate
2296 else if (rc == -ENOMEM && budget)
2298 else if (rc == -EBUSY) /* partial completion */
2300 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2301 CMPL_BASE_TYPE_HWRM_DONE) ||
2302 (TX_CMP_TYPE(txcmp) ==
2303 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2304 (TX_CMP_TYPE(txcmp) ==
2305 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2306 bnxt_hwrm_handler(bp, txcmp);
2308 raw_cons = NEXT_RAW_CMP(raw_cons);
2310 if (rx_pkts && rx_pkts == budget) {
2311 cpr->has_more_work = 1;
2316 if (event & BNXT_REDIRECT_EVENT)
2319 if (event & BNXT_TX_EVENT) {
2320 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2321 u16 prod = txr->tx_prod;
2323 /* Sync BD data before updating doorbell */
2326 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2329 cpr->cp_raw_cons = raw_cons;
2330 bnapi->tx_pkts += tx_pkts;
2331 bnapi->events |= event;
2335 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2337 if (bnapi->tx_pkts) {
2338 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2342 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2343 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2345 if (bnapi->events & BNXT_AGG_EVENT)
2346 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2347 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2352 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2355 struct bnxt_napi *bnapi = cpr->bnapi;
2358 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2360 /* ACK completion ring before freeing tx ring and producing new
2361 * buffers in rx/agg rings to prevent overflowing the completion
2364 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2366 __bnxt_poll_work_done(bp, bnapi);
2370 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2372 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2373 struct bnxt *bp = bnapi->bp;
2374 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2375 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2376 struct tx_cmp *txcmp;
2377 struct rx_cmp_ext *rxcmp1;
2378 u32 cp_cons, tmp_raw_cons;
2379 u32 raw_cons = cpr->cp_raw_cons;
2386 cp_cons = RING_CMP(raw_cons);
2387 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2389 if (!TX_CMP_VALID(txcmp, raw_cons))
2392 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2393 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2394 cp_cons = RING_CMP(tmp_raw_cons);
2395 rxcmp1 = (struct rx_cmp_ext *)
2396 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2398 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2401 /* force an error to recycle the buffer */
2402 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2403 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2405 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2406 if (likely(rc == -EIO) && budget)
2408 else if (rc == -EBUSY) /* partial completion */
2410 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2411 CMPL_BASE_TYPE_HWRM_DONE)) {
2412 bnxt_hwrm_handler(bp, txcmp);
2415 "Invalid completion received on special ring\n");
2417 raw_cons = NEXT_RAW_CMP(raw_cons);
2419 if (rx_pkts == budget)
2423 cpr->cp_raw_cons = raw_cons;
2424 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2425 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2427 if (event & BNXT_AGG_EVENT)
2428 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2430 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2431 napi_complete_done(napi, rx_pkts);
2432 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2437 static int bnxt_poll(struct napi_struct *napi, int budget)
2439 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2440 struct bnxt *bp = bnapi->bp;
2441 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2444 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2445 napi_complete(napi);
2449 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2451 if (work_done >= budget) {
2453 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2457 if (!bnxt_has_work(bp, cpr)) {
2458 if (napi_complete_done(napi, work_done))
2459 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2463 if (bp->flags & BNXT_FLAG_DIM) {
2464 struct dim_sample dim_sample = {};
2466 dim_update_sample(cpr->event_ctr,
2470 net_dim(&cpr->dim, dim_sample);
2475 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2477 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2478 int i, work_done = 0;
2480 for (i = 0; i < 2; i++) {
2481 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2484 work_done += __bnxt_poll_work(bp, cpr2,
2485 budget - work_done);
2486 cpr->has_more_work |= cpr2->has_more_work;
2492 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2495 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2498 for (i = 0; i < 2; i++) {
2499 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2500 struct bnxt_db_info *db;
2502 if (cpr2 && cpr2->had_work_done) {
2504 writeq(db->db_key64 | dbr_type |
2505 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2506 cpr2->had_work_done = 0;
2509 __bnxt_poll_work_done(bp, bnapi);
2512 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2514 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2515 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2516 u32 raw_cons = cpr->cp_raw_cons;
2517 struct bnxt *bp = bnapi->bp;
2518 struct nqe_cn *nqcmp;
2522 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2523 napi_complete(napi);
2526 if (cpr->has_more_work) {
2527 cpr->has_more_work = 0;
2528 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2531 cons = RING_CMP(raw_cons);
2532 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2534 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2535 if (cpr->has_more_work)
2538 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2539 cpr->cp_raw_cons = raw_cons;
2540 if (napi_complete_done(napi, work_done))
2541 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2546 /* The valid test of the entry must be done first before
2547 * reading any further.
2551 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2552 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2553 struct bnxt_cp_ring_info *cpr2;
2555 cpr2 = cpr->cp_ring_arr[idx];
2556 work_done += __bnxt_poll_work(bp, cpr2,
2557 budget - work_done);
2558 cpr->has_more_work |= cpr2->has_more_work;
2560 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2562 raw_cons = NEXT_RAW_CMP(raw_cons);
2564 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2565 if (raw_cons != cpr->cp_raw_cons) {
2566 cpr->cp_raw_cons = raw_cons;
2567 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2572 static void bnxt_free_tx_skbs(struct bnxt *bp)
2575 struct pci_dev *pdev = bp->pdev;
2580 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2581 for (i = 0; i < bp->tx_nr_rings; i++) {
2582 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2585 for (j = 0; j < max_idx;) {
2586 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2587 struct sk_buff *skb;
2590 if (i < bp->tx_nr_rings_xdp &&
2591 tx_buf->action == XDP_REDIRECT) {
2592 dma_unmap_single(&pdev->dev,
2593 dma_unmap_addr(tx_buf, mapping),
2594 dma_unmap_len(tx_buf, len),
2596 xdp_return_frame(tx_buf->xdpf);
2598 tx_buf->xdpf = NULL;
2611 if (tx_buf->is_push) {
2617 dma_unmap_single(&pdev->dev,
2618 dma_unmap_addr(tx_buf, mapping),
2622 last = tx_buf->nr_frags;
2624 for (k = 0; k < last; k++, j++) {
2625 int ring_idx = j & bp->tx_ring_mask;
2626 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2628 tx_buf = &txr->tx_buf_ring[ring_idx];
2631 dma_unmap_addr(tx_buf, mapping),
2632 skb_frag_size(frag), PCI_DMA_TODEVICE);
2636 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2640 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2642 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2643 struct pci_dev *pdev = bp->pdev;
2644 struct bnxt_tpa_idx_map *map;
2645 int i, max_idx, max_agg_idx;
2647 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2648 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2650 goto skip_rx_tpa_free;
2652 for (i = 0; i < bp->max_tpa; i++) {
2653 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2654 u8 *data = tpa_info->data;
2659 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2660 bp->rx_buf_use_size, bp->rx_dir,
2661 DMA_ATTR_WEAK_ORDERING);
2663 tpa_info->data = NULL;
2669 for (i = 0; i < max_idx; i++) {
2670 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2671 dma_addr_t mapping = rx_buf->mapping;
2672 void *data = rx_buf->data;
2677 rx_buf->data = NULL;
2678 if (BNXT_RX_PAGE_MODE(bp)) {
2679 mapping -= bp->rx_dma_offset;
2680 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2682 DMA_ATTR_WEAK_ORDERING);
2683 page_pool_recycle_direct(rxr->page_pool, data);
2685 dma_unmap_single_attrs(&pdev->dev, mapping,
2686 bp->rx_buf_use_size, bp->rx_dir,
2687 DMA_ATTR_WEAK_ORDERING);
2691 for (i = 0; i < max_agg_idx; i++) {
2692 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2693 struct page *page = rx_agg_buf->page;
2698 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2699 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
2700 DMA_ATTR_WEAK_ORDERING);
2702 rx_agg_buf->page = NULL;
2703 __clear_bit(i, rxr->rx_agg_bmap);
2708 __free_page(rxr->rx_page);
2709 rxr->rx_page = NULL;
2711 map = rxr->rx_tpa_idx_map;
2713 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2716 static void bnxt_free_rx_skbs(struct bnxt *bp)
2723 for (i = 0; i < bp->rx_nr_rings; i++)
2724 bnxt_free_one_rx_ring_skbs(bp, i);
2727 static void bnxt_free_skbs(struct bnxt *bp)
2729 bnxt_free_tx_skbs(bp);
2730 bnxt_free_rx_skbs(bp);
2733 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2735 u8 init_val = mem_init->init_val;
2736 u16 offset = mem_init->offset;
2742 if (offset == BNXT_MEM_INVALID_OFFSET) {
2743 memset(p, init_val, len);
2746 for (i = 0; i < len; i += mem_init->size)
2747 *(p2 + i + offset) = init_val;
2750 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2752 struct pci_dev *pdev = bp->pdev;
2755 for (i = 0; i < rmem->nr_pages; i++) {
2756 if (!rmem->pg_arr[i])
2759 dma_free_coherent(&pdev->dev, rmem->page_size,
2760 rmem->pg_arr[i], rmem->dma_arr[i]);
2762 rmem->pg_arr[i] = NULL;
2765 size_t pg_tbl_size = rmem->nr_pages * 8;
2767 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2768 pg_tbl_size = rmem->page_size;
2769 dma_free_coherent(&pdev->dev, pg_tbl_size,
2770 rmem->pg_tbl, rmem->pg_tbl_map);
2771 rmem->pg_tbl = NULL;
2773 if (rmem->vmem_size && *rmem->vmem) {
2779 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2781 struct pci_dev *pdev = bp->pdev;
2785 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2786 valid_bit = PTU_PTE_VALID;
2787 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2788 size_t pg_tbl_size = rmem->nr_pages * 8;
2790 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2791 pg_tbl_size = rmem->page_size;
2792 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2799 for (i = 0; i < rmem->nr_pages; i++) {
2800 u64 extra_bits = valid_bit;
2802 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2806 if (!rmem->pg_arr[i])
2810 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2812 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2813 if (i == rmem->nr_pages - 2 &&
2814 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2815 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2816 else if (i == rmem->nr_pages - 1 &&
2817 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2818 extra_bits |= PTU_PTE_LAST;
2820 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2824 if (rmem->vmem_size) {
2825 *rmem->vmem = vzalloc(rmem->vmem_size);
2832 static void bnxt_free_tpa_info(struct bnxt *bp)
2836 for (i = 0; i < bp->rx_nr_rings; i++) {
2837 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2839 kfree(rxr->rx_tpa_idx_map);
2840 rxr->rx_tpa_idx_map = NULL;
2842 kfree(rxr->rx_tpa[0].agg_arr);
2843 rxr->rx_tpa[0].agg_arr = NULL;
2850 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2852 int i, j, total_aggs = 0;
2854 bp->max_tpa = MAX_TPA;
2855 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2856 if (!bp->max_tpa_v2)
2858 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2859 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2862 for (i = 0; i < bp->rx_nr_rings; i++) {
2863 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2864 struct rx_agg_cmp *agg;
2866 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2871 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2873 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2874 rxr->rx_tpa[0].agg_arr = agg;
2877 for (j = 1; j < bp->max_tpa; j++)
2878 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2879 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2881 if (!rxr->rx_tpa_idx_map)
2887 static void bnxt_free_rx_rings(struct bnxt *bp)
2894 bnxt_free_tpa_info(bp);
2895 for (i = 0; i < bp->rx_nr_rings; i++) {
2896 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2897 struct bnxt_ring_struct *ring;
2900 bpf_prog_put(rxr->xdp_prog);
2902 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2903 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2905 page_pool_destroy(rxr->page_pool);
2906 rxr->page_pool = NULL;
2908 kfree(rxr->rx_agg_bmap);
2909 rxr->rx_agg_bmap = NULL;
2911 ring = &rxr->rx_ring_struct;
2912 bnxt_free_ring(bp, &ring->ring_mem);
2914 ring = &rxr->rx_agg_ring_struct;
2915 bnxt_free_ring(bp, &ring->ring_mem);
2919 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2920 struct bnxt_rx_ring_info *rxr)
2922 struct page_pool_params pp = { 0 };
2924 pp.pool_size = bp->rx_ring_size;
2925 pp.nid = dev_to_node(&bp->pdev->dev);
2926 pp.dev = &bp->pdev->dev;
2927 pp.dma_dir = DMA_BIDIRECTIONAL;
2929 rxr->page_pool = page_pool_create(&pp);
2930 if (IS_ERR(rxr->page_pool)) {
2931 int err = PTR_ERR(rxr->page_pool);
2933 rxr->page_pool = NULL;
2939 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2941 int i, rc = 0, agg_rings = 0;
2946 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2949 for (i = 0; i < bp->rx_nr_rings; i++) {
2950 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2951 struct bnxt_ring_struct *ring;
2953 ring = &rxr->rx_ring_struct;
2955 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2959 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
2963 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
2967 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2971 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2979 ring = &rxr->rx_agg_ring_struct;
2980 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2985 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2986 mem_size = rxr->rx_agg_bmap_size / 8;
2987 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2988 if (!rxr->rx_agg_bmap)
2992 if (bp->flags & BNXT_FLAG_TPA)
2993 rc = bnxt_alloc_tpa_info(bp);
2997 static void bnxt_free_tx_rings(struct bnxt *bp)
3000 struct pci_dev *pdev = bp->pdev;
3005 for (i = 0; i < bp->tx_nr_rings; i++) {
3006 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3007 struct bnxt_ring_struct *ring;
3010 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3011 txr->tx_push, txr->tx_push_mapping);
3012 txr->tx_push = NULL;
3015 ring = &txr->tx_ring_struct;
3017 bnxt_free_ring(bp, &ring->ring_mem);
3021 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3024 struct pci_dev *pdev = bp->pdev;
3026 bp->tx_push_size = 0;
3027 if (bp->tx_push_thresh) {
3030 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3031 bp->tx_push_thresh);
3033 if (push_size > 256) {
3035 bp->tx_push_thresh = 0;
3038 bp->tx_push_size = push_size;
3041 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3042 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3043 struct bnxt_ring_struct *ring;
3046 ring = &txr->tx_ring_struct;
3048 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3052 ring->grp_idx = txr->bnapi->index;
3053 if (bp->tx_push_size) {
3056 /* One pre-allocated DMA buffer to backup
3059 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3061 &txr->tx_push_mapping,
3067 mapping = txr->tx_push_mapping +
3068 sizeof(struct tx_push_bd);
3069 txr->data_mapping = cpu_to_le64(mapping);
3071 qidx = bp->tc_to_qidx[j];
3072 ring->queue_id = bp->q_info[qidx].queue_id;
3073 if (i < bp->tx_nr_rings_xdp)
3075 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3081 static void bnxt_free_cp_rings(struct bnxt *bp)
3088 for (i = 0; i < bp->cp_nr_rings; i++) {
3089 struct bnxt_napi *bnapi = bp->bnapi[i];
3090 struct bnxt_cp_ring_info *cpr;
3091 struct bnxt_ring_struct *ring;
3097 cpr = &bnapi->cp_ring;
3098 ring = &cpr->cp_ring_struct;
3100 bnxt_free_ring(bp, &ring->ring_mem);
3102 for (j = 0; j < 2; j++) {
3103 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3106 ring = &cpr2->cp_ring_struct;
3107 bnxt_free_ring(bp, &ring->ring_mem);
3109 cpr->cp_ring_arr[j] = NULL;
3115 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3117 struct bnxt_ring_mem_info *rmem;
3118 struct bnxt_ring_struct *ring;
3119 struct bnxt_cp_ring_info *cpr;
3122 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3126 ring = &cpr->cp_ring_struct;
3127 rmem = &ring->ring_mem;
3128 rmem->nr_pages = bp->cp_nr_pages;
3129 rmem->page_size = HW_CMPD_RING_SIZE;
3130 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3131 rmem->dma_arr = cpr->cp_desc_mapping;
3132 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3133 rc = bnxt_alloc_ring(bp, rmem);
3135 bnxt_free_ring(bp, rmem);
3142 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3144 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3145 int i, rc, ulp_base_vec, ulp_msix;
3147 ulp_msix = bnxt_get_ulp_msix_num(bp);
3148 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3149 for (i = 0; i < bp->cp_nr_rings; i++) {
3150 struct bnxt_napi *bnapi = bp->bnapi[i];
3151 struct bnxt_cp_ring_info *cpr;
3152 struct bnxt_ring_struct *ring;
3157 cpr = &bnapi->cp_ring;
3159 ring = &cpr->cp_ring_struct;
3161 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3165 if (ulp_msix && i >= ulp_base_vec)
3166 ring->map_idx = i + ulp_msix;
3170 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3173 if (i < bp->rx_nr_rings) {
3174 struct bnxt_cp_ring_info *cpr2 =
3175 bnxt_alloc_cp_sub_ring(bp);
3177 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3180 cpr2->bnapi = bnapi;
3182 if ((sh && i < bp->tx_nr_rings) ||
3183 (!sh && i >= bp->rx_nr_rings)) {
3184 struct bnxt_cp_ring_info *cpr2 =
3185 bnxt_alloc_cp_sub_ring(bp);
3187 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3190 cpr2->bnapi = bnapi;
3196 static void bnxt_init_ring_struct(struct bnxt *bp)
3200 for (i = 0; i < bp->cp_nr_rings; i++) {
3201 struct bnxt_napi *bnapi = bp->bnapi[i];
3202 struct bnxt_ring_mem_info *rmem;
3203 struct bnxt_cp_ring_info *cpr;
3204 struct bnxt_rx_ring_info *rxr;
3205 struct bnxt_tx_ring_info *txr;
3206 struct bnxt_ring_struct *ring;
3211 cpr = &bnapi->cp_ring;
3212 ring = &cpr->cp_ring_struct;
3213 rmem = &ring->ring_mem;
3214 rmem->nr_pages = bp->cp_nr_pages;
3215 rmem->page_size = HW_CMPD_RING_SIZE;
3216 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3217 rmem->dma_arr = cpr->cp_desc_mapping;
3218 rmem->vmem_size = 0;
3220 rxr = bnapi->rx_ring;
3224 ring = &rxr->rx_ring_struct;
3225 rmem = &ring->ring_mem;
3226 rmem->nr_pages = bp->rx_nr_pages;
3227 rmem->page_size = HW_RXBD_RING_SIZE;
3228 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3229 rmem->dma_arr = rxr->rx_desc_mapping;
3230 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3231 rmem->vmem = (void **)&rxr->rx_buf_ring;
3233 ring = &rxr->rx_agg_ring_struct;
3234 rmem = &ring->ring_mem;
3235 rmem->nr_pages = bp->rx_agg_nr_pages;
3236 rmem->page_size = HW_RXBD_RING_SIZE;
3237 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3238 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3239 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3240 rmem->vmem = (void **)&rxr->rx_agg_ring;
3243 txr = bnapi->tx_ring;
3247 ring = &txr->tx_ring_struct;
3248 rmem = &ring->ring_mem;
3249 rmem->nr_pages = bp->tx_nr_pages;
3250 rmem->page_size = HW_RXBD_RING_SIZE;
3251 rmem->pg_arr = (void **)txr->tx_desc_ring;
3252 rmem->dma_arr = txr->tx_desc_mapping;
3253 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3254 rmem->vmem = (void **)&txr->tx_buf_ring;
3258 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3262 struct rx_bd **rx_buf_ring;
3264 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3265 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3269 rxbd = rx_buf_ring[i];
3273 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3274 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3275 rxbd->rx_bd_opaque = prod;
3280 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3282 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3283 struct net_device *dev = bp->dev;
3287 prod = rxr->rx_prod;
3288 for (i = 0; i < bp->rx_ring_size; i++) {
3289 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3290 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3291 ring_nr, i, bp->rx_ring_size);
3294 prod = NEXT_RX(prod);
3296 rxr->rx_prod = prod;
3298 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3301 prod = rxr->rx_agg_prod;
3302 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3303 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3304 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3305 ring_nr, i, bp->rx_ring_size);
3308 prod = NEXT_RX_AGG(prod);
3310 rxr->rx_agg_prod = prod;
3316 for (i = 0; i < bp->max_tpa; i++) {
3317 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3321 rxr->rx_tpa[i].data = data;
3322 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3323 rxr->rx_tpa[i].mapping = mapping;
3329 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3331 struct bnxt_rx_ring_info *rxr;
3332 struct bnxt_ring_struct *ring;
3335 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3336 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3338 if (NET_IP_ALIGN == 2)
3339 type |= RX_BD_FLAGS_SOP;
3341 rxr = &bp->rx_ring[ring_nr];
3342 ring = &rxr->rx_ring_struct;
3343 bnxt_init_rxbd_pages(ring, type);
3345 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3346 bpf_prog_add(bp->xdp_prog, 1);
3347 rxr->xdp_prog = bp->xdp_prog;
3349 ring->fw_ring_id = INVALID_HW_RING_ID;
3351 ring = &rxr->rx_agg_ring_struct;
3352 ring->fw_ring_id = INVALID_HW_RING_ID;
3354 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3355 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3356 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3358 bnxt_init_rxbd_pages(ring, type);
3361 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3364 static void bnxt_init_cp_rings(struct bnxt *bp)
3368 for (i = 0; i < bp->cp_nr_rings; i++) {
3369 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3370 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3372 ring->fw_ring_id = INVALID_HW_RING_ID;
3373 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3374 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3375 for (j = 0; j < 2; j++) {
3376 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3381 ring = &cpr2->cp_ring_struct;
3382 ring->fw_ring_id = INVALID_HW_RING_ID;
3383 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3384 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3389 static int bnxt_init_rx_rings(struct bnxt *bp)
3393 if (BNXT_RX_PAGE_MODE(bp)) {
3394 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3395 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3397 bp->rx_offset = BNXT_RX_OFFSET;
3398 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3401 for (i = 0; i < bp->rx_nr_rings; i++) {
3402 rc = bnxt_init_one_rx_ring(bp, i);
3410 static int bnxt_init_tx_rings(struct bnxt *bp)
3414 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3417 for (i = 0; i < bp->tx_nr_rings; i++) {
3418 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3419 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3421 ring->fw_ring_id = INVALID_HW_RING_ID;
3427 static void bnxt_free_ring_grps(struct bnxt *bp)
3429 kfree(bp->grp_info);
3430 bp->grp_info = NULL;
3433 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3438 bp->grp_info = kcalloc(bp->cp_nr_rings,
3439 sizeof(struct bnxt_ring_grp_info),
3444 for (i = 0; i < bp->cp_nr_rings; i++) {
3446 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3447 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3448 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3449 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3450 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3455 static void bnxt_free_vnics(struct bnxt *bp)
3457 kfree(bp->vnic_info);
3458 bp->vnic_info = NULL;
3462 static int bnxt_alloc_vnics(struct bnxt *bp)
3466 #ifdef CONFIG_RFS_ACCEL
3467 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3468 num_vnics += bp->rx_nr_rings;
3471 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3474 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3479 bp->nr_vnics = num_vnics;
3483 static void bnxt_init_vnics(struct bnxt *bp)
3487 for (i = 0; i < bp->nr_vnics; i++) {
3488 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3491 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3492 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3493 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3495 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3497 if (bp->vnic_info[i].rss_hash_key) {
3499 prandom_bytes(vnic->rss_hash_key,
3502 memcpy(vnic->rss_hash_key,
3503 bp->vnic_info[0].rss_hash_key,
3509 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3513 pages = ring_size / desc_per_pg;
3520 while (pages & (pages - 1))
3526 void bnxt_set_tpa_flags(struct bnxt *bp)
3528 bp->flags &= ~BNXT_FLAG_TPA;
3529 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3531 if (bp->dev->features & NETIF_F_LRO)
3532 bp->flags |= BNXT_FLAG_LRO;
3533 else if (bp->dev->features & NETIF_F_GRO_HW)
3534 bp->flags |= BNXT_FLAG_GRO;
3537 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3540 void bnxt_set_ring_params(struct bnxt *bp)
3542 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3543 u32 agg_factor = 0, agg_ring_size = 0;
3545 /* 8 for CRC and VLAN */
3546 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3548 rx_space = rx_size + NET_SKB_PAD +
3549 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3551 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3552 ring_size = bp->rx_ring_size;
3553 bp->rx_agg_ring_size = 0;
3554 bp->rx_agg_nr_pages = 0;
3556 if (bp->flags & BNXT_FLAG_TPA)
3557 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3559 bp->flags &= ~BNXT_FLAG_JUMBO;
3560 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3563 bp->flags |= BNXT_FLAG_JUMBO;
3564 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3565 if (jumbo_factor > agg_factor)
3566 agg_factor = jumbo_factor;
3568 agg_ring_size = ring_size * agg_factor;
3570 if (agg_ring_size) {
3571 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3573 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3574 u32 tmp = agg_ring_size;
3576 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3577 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3578 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3579 tmp, agg_ring_size);
3581 bp->rx_agg_ring_size = agg_ring_size;
3582 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3583 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3584 rx_space = rx_size + NET_SKB_PAD +
3585 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3588 bp->rx_buf_use_size = rx_size;
3589 bp->rx_buf_size = rx_space;
3591 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3592 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3594 ring_size = bp->tx_ring_size;
3595 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3596 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3598 max_rx_cmpl = bp->rx_ring_size;
3599 /* MAX TPA needs to be added because TPA_START completions are
3600 * immediately recycled, so the TPA completions are not bound by
3603 if (bp->flags & BNXT_FLAG_TPA)
3604 max_rx_cmpl += bp->max_tpa;
3605 /* RX and TPA completions are 32-byte, all others are 16-byte */
3606 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3607 bp->cp_ring_size = ring_size;
3609 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3610 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3611 bp->cp_nr_pages = MAX_CP_PAGES;
3612 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3613 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3614 ring_size, bp->cp_ring_size);
3616 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3617 bp->cp_ring_mask = bp->cp_bit - 1;
3620 /* Changing allocation mode of RX rings.
3621 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3623 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3626 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3629 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3630 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3631 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3632 bp->rx_dir = DMA_BIDIRECTIONAL;
3633 bp->rx_skb_func = bnxt_rx_page_skb;
3634 /* Disable LRO or GRO_HW */
3635 netdev_update_features(bp->dev);
3637 bp->dev->max_mtu = bp->max_mtu;
3638 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3639 bp->rx_dir = DMA_FROM_DEVICE;
3640 bp->rx_skb_func = bnxt_rx_skb;
3645 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3648 struct bnxt_vnic_info *vnic;
3649 struct pci_dev *pdev = bp->pdev;
3654 for (i = 0; i < bp->nr_vnics; i++) {
3655 vnic = &bp->vnic_info[i];
3657 kfree(vnic->fw_grp_ids);
3658 vnic->fw_grp_ids = NULL;
3660 kfree(vnic->uc_list);
3661 vnic->uc_list = NULL;
3663 if (vnic->mc_list) {
3664 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3665 vnic->mc_list, vnic->mc_list_mapping);
3666 vnic->mc_list = NULL;
3669 if (vnic->rss_table) {
3670 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3672 vnic->rss_table_dma_addr);
3673 vnic->rss_table = NULL;
3676 vnic->rss_hash_key = NULL;
3681 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3683 int i, rc = 0, size;
3684 struct bnxt_vnic_info *vnic;
3685 struct pci_dev *pdev = bp->pdev;
3688 for (i = 0; i < bp->nr_vnics; i++) {
3689 vnic = &bp->vnic_info[i];
3691 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3692 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3695 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3696 if (!vnic->uc_list) {
3703 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3704 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3706 dma_alloc_coherent(&pdev->dev,
3708 &vnic->mc_list_mapping,
3710 if (!vnic->mc_list) {
3716 if (bp->flags & BNXT_FLAG_CHIP_P5)
3717 goto vnic_skip_grps;
3719 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3720 max_rings = bp->rx_nr_rings;
3724 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3725 if (!vnic->fw_grp_ids) {
3730 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3731 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3734 /* Allocate rss table and hash key */
3735 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3736 if (bp->flags & BNXT_FLAG_CHIP_P5)
3737 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3739 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3740 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3741 vnic->rss_table_size,
3742 &vnic->rss_table_dma_addr,
3744 if (!vnic->rss_table) {
3749 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3750 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3758 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3760 struct pci_dev *pdev = bp->pdev;
3762 if (bp->hwrm_cmd_resp_addr) {
3763 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3764 bp->hwrm_cmd_resp_dma_addr);
3765 bp->hwrm_cmd_resp_addr = NULL;
3768 if (bp->hwrm_cmd_kong_resp_addr) {
3769 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3770 bp->hwrm_cmd_kong_resp_addr,
3771 bp->hwrm_cmd_kong_resp_dma_addr);
3772 bp->hwrm_cmd_kong_resp_addr = NULL;
3776 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3778 struct pci_dev *pdev = bp->pdev;
3780 if (bp->hwrm_cmd_kong_resp_addr)
3783 bp->hwrm_cmd_kong_resp_addr =
3784 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3785 &bp->hwrm_cmd_kong_resp_dma_addr,
3787 if (!bp->hwrm_cmd_kong_resp_addr)
3793 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3795 struct pci_dev *pdev = bp->pdev;
3797 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3798 &bp->hwrm_cmd_resp_dma_addr,
3800 if (!bp->hwrm_cmd_resp_addr)
3806 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3808 if (bp->hwrm_short_cmd_req_addr) {
3809 struct pci_dev *pdev = bp->pdev;
3811 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3812 bp->hwrm_short_cmd_req_addr,
3813 bp->hwrm_short_cmd_req_dma_addr);
3814 bp->hwrm_short_cmd_req_addr = NULL;
3818 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3820 struct pci_dev *pdev = bp->pdev;
3822 if (bp->hwrm_short_cmd_req_addr)
3825 bp->hwrm_short_cmd_req_addr =
3826 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3827 &bp->hwrm_short_cmd_req_dma_addr,
3829 if (!bp->hwrm_short_cmd_req_addr)
3835 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3837 kfree(stats->hw_masks);
3838 stats->hw_masks = NULL;
3839 kfree(stats->sw_stats);
3840 stats->sw_stats = NULL;
3841 if (stats->hw_stats) {
3842 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3843 stats->hw_stats_map);
3844 stats->hw_stats = NULL;
3848 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
3851 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
3852 &stats->hw_stats_map, GFP_KERNEL);
3853 if (!stats->hw_stats)
3856 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
3857 if (!stats->sw_stats)
3861 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
3862 if (!stats->hw_masks)
3868 bnxt_free_stats_mem(bp, stats);
3872 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
3876 for (i = 0; i < count; i++)
3880 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
3884 for (i = 0; i < count; i++)
3885 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
3888 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
3889 struct bnxt_stats_mem *stats)
3891 struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3892 struct hwrm_func_qstats_ext_input req = {0};
3896 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
3897 !(bp->flags & BNXT_FLAG_CHIP_P5))
3900 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
3901 req.fid = cpu_to_le16(0xffff);
3902 req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3903 mutex_lock(&bp->hwrm_cmd_lock);
3904 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3908 hw_masks = &resp->rx_ucast_pkts;
3909 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
3912 mutex_unlock(&bp->hwrm_cmd_lock);
3916 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
3917 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
3919 static void bnxt_init_stats(struct bnxt *bp)
3921 struct bnxt_napi *bnapi = bp->bnapi[0];
3922 struct bnxt_cp_ring_info *cpr;
3923 struct bnxt_stats_mem *stats;
3924 __le64 *rx_stats, *tx_stats;
3925 int rc, rx_count, tx_count;
3926 u64 *rx_masks, *tx_masks;
3930 cpr = &bnapi->cp_ring;
3931 stats = &cpr->stats;
3932 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
3934 if (bp->flags & BNXT_FLAG_CHIP_P5)
3935 mask = (1ULL << 48) - 1;
3938 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
3940 if (bp->flags & BNXT_FLAG_PORT_STATS) {
3941 stats = &bp->port_stats;
3942 rx_stats = stats->hw_stats;
3943 rx_masks = stats->hw_masks;
3944 rx_count = sizeof(struct rx_port_stats) / 8;
3945 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3946 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3947 tx_count = sizeof(struct tx_port_stats) / 8;
3949 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
3950 rc = bnxt_hwrm_port_qstats(bp, flags);
3952 mask = (1ULL << 40) - 1;
3954 bnxt_fill_masks(rx_masks, mask, rx_count);
3955 bnxt_fill_masks(tx_masks, mask, tx_count);
3957 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
3958 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
3959 bnxt_hwrm_port_qstats(bp, 0);
3962 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
3963 stats = &bp->rx_port_stats_ext;
3964 rx_stats = stats->hw_stats;
3965 rx_masks = stats->hw_masks;
3966 rx_count = sizeof(struct rx_port_stats_ext) / 8;
3967 stats = &bp->tx_port_stats_ext;
3968 tx_stats = stats->hw_stats;
3969 tx_masks = stats->hw_masks;
3970 tx_count = sizeof(struct tx_port_stats_ext) / 8;
3972 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3973 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
3975 mask = (1ULL << 40) - 1;
3977 bnxt_fill_masks(rx_masks, mask, rx_count);
3979 bnxt_fill_masks(tx_masks, mask, tx_count);
3981 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
3983 bnxt_copy_hw_masks(tx_masks, tx_stats,
3985 bnxt_hwrm_port_qstats_ext(bp, 0);
3990 static void bnxt_free_port_stats(struct bnxt *bp)
3992 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3993 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3995 bnxt_free_stats_mem(bp, &bp->port_stats);
3996 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
3997 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4000 static void bnxt_free_ring_stats(struct bnxt *bp)
4007 for (i = 0; i < bp->cp_nr_rings; i++) {
4008 struct bnxt_napi *bnapi = bp->bnapi[i];
4009 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4011 bnxt_free_stats_mem(bp, &cpr->stats);
4015 static int bnxt_alloc_stats(struct bnxt *bp)
4020 size = bp->hw_ring_stats_size;
4022 for (i = 0; i < bp->cp_nr_rings; i++) {
4023 struct bnxt_napi *bnapi = bp->bnapi[i];
4024 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4026 cpr->stats.len = size;
4027 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4031 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4034 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4037 if (bp->port_stats.hw_stats)
4038 goto alloc_ext_stats;
4040 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4041 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4045 bp->flags |= BNXT_FLAG_PORT_STATS;
4048 /* Display extended statistics only if FW supports it */
4049 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4050 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4053 if (bp->rx_port_stats_ext.hw_stats)
4054 goto alloc_tx_ext_stats;
4056 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4057 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4058 /* Extended stats are optional */
4063 if (bp->tx_port_stats_ext.hw_stats)
4066 if (bp->hwrm_spec_code >= 0x10902 ||
4067 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4068 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4069 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4070 /* Extended stats are optional */
4074 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4078 static void bnxt_clear_ring_indices(struct bnxt *bp)
4085 for (i = 0; i < bp->cp_nr_rings; i++) {
4086 struct bnxt_napi *bnapi = bp->bnapi[i];
4087 struct bnxt_cp_ring_info *cpr;
4088 struct bnxt_rx_ring_info *rxr;
4089 struct bnxt_tx_ring_info *txr;
4094 cpr = &bnapi->cp_ring;
4095 cpr->cp_raw_cons = 0;
4097 txr = bnapi->tx_ring;
4103 rxr = bnapi->rx_ring;
4106 rxr->rx_agg_prod = 0;
4107 rxr->rx_sw_agg_prod = 0;
4108 rxr->rx_next_cons = 0;
4113 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4115 #ifdef CONFIG_RFS_ACCEL
4118 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4119 * safe to delete the hash table.
4121 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4122 struct hlist_head *head;
4123 struct hlist_node *tmp;
4124 struct bnxt_ntuple_filter *fltr;
4126 head = &bp->ntp_fltr_hash_tbl[i];
4127 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4128 hlist_del(&fltr->hash);
4133 kfree(bp->ntp_fltr_bmap);
4134 bp->ntp_fltr_bmap = NULL;
4136 bp->ntp_fltr_count = 0;
4140 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4142 #ifdef CONFIG_RFS_ACCEL
4145 if (!(bp->flags & BNXT_FLAG_RFS))
4148 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4149 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4151 bp->ntp_fltr_count = 0;
4152 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4156 if (!bp->ntp_fltr_bmap)
4165 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4167 bnxt_free_vnic_attributes(bp);
4168 bnxt_free_tx_rings(bp);
4169 bnxt_free_rx_rings(bp);
4170 bnxt_free_cp_rings(bp);
4171 bnxt_free_ntp_fltrs(bp, irq_re_init);
4173 bnxt_free_ring_stats(bp);
4174 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4175 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4176 bnxt_free_port_stats(bp);
4177 bnxt_free_ring_grps(bp);
4178 bnxt_free_vnics(bp);
4179 kfree(bp->tx_ring_map);
4180 bp->tx_ring_map = NULL;
4188 bnxt_clear_ring_indices(bp);
4192 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4194 int i, j, rc, size, arr_size;
4198 /* Allocate bnapi mem pointer array and mem block for
4201 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4203 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4204 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4210 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4211 bp->bnapi[i] = bnapi;
4212 bp->bnapi[i]->index = i;
4213 bp->bnapi[i]->bp = bp;
4214 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4215 struct bnxt_cp_ring_info *cpr =
4216 &bp->bnapi[i]->cp_ring;
4218 cpr->cp_ring_struct.ring_mem.flags =
4219 BNXT_RMEM_RING_PTE_FLAG;
4223 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4224 sizeof(struct bnxt_rx_ring_info),
4229 for (i = 0; i < bp->rx_nr_rings; i++) {
4230 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4232 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4233 rxr->rx_ring_struct.ring_mem.flags =
4234 BNXT_RMEM_RING_PTE_FLAG;
4235 rxr->rx_agg_ring_struct.ring_mem.flags =
4236 BNXT_RMEM_RING_PTE_FLAG;
4238 rxr->bnapi = bp->bnapi[i];
4239 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4242 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4243 sizeof(struct bnxt_tx_ring_info),
4248 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4251 if (!bp->tx_ring_map)
4254 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4257 j = bp->rx_nr_rings;
4259 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4260 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4262 if (bp->flags & BNXT_FLAG_CHIP_P5)
4263 txr->tx_ring_struct.ring_mem.flags =
4264 BNXT_RMEM_RING_PTE_FLAG;
4265 txr->bnapi = bp->bnapi[j];
4266 bp->bnapi[j]->tx_ring = txr;
4267 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4268 if (i >= bp->tx_nr_rings_xdp) {
4269 txr->txq_index = i - bp->tx_nr_rings_xdp;
4270 bp->bnapi[j]->tx_int = bnxt_tx_int;
4272 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4273 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4277 rc = bnxt_alloc_stats(bp);
4280 bnxt_init_stats(bp);
4282 rc = bnxt_alloc_ntp_fltrs(bp);
4286 rc = bnxt_alloc_vnics(bp);
4291 bnxt_init_ring_struct(bp);
4293 rc = bnxt_alloc_rx_rings(bp);
4297 rc = bnxt_alloc_tx_rings(bp);
4301 rc = bnxt_alloc_cp_rings(bp);
4305 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4306 BNXT_VNIC_UCAST_FLAG;
4307 rc = bnxt_alloc_vnic_attributes(bp);
4313 bnxt_free_mem(bp, true);
4317 static void bnxt_disable_int(struct bnxt *bp)
4324 for (i = 0; i < bp->cp_nr_rings; i++) {
4325 struct bnxt_napi *bnapi = bp->bnapi[i];
4326 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4327 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4329 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4330 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4334 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4336 struct bnxt_napi *bnapi = bp->bnapi[n];
4337 struct bnxt_cp_ring_info *cpr;
4339 cpr = &bnapi->cp_ring;
4340 return cpr->cp_ring_struct.map_idx;
4343 static void bnxt_disable_int_sync(struct bnxt *bp)
4350 atomic_inc(&bp->intr_sem);
4352 bnxt_disable_int(bp);
4353 for (i = 0; i < bp->cp_nr_rings; i++) {
4354 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4356 synchronize_irq(bp->irq_tbl[map_idx].vector);
4360 static void bnxt_enable_int(struct bnxt *bp)
4364 atomic_set(&bp->intr_sem, 0);
4365 for (i = 0; i < bp->cp_nr_rings; i++) {
4366 struct bnxt_napi *bnapi = bp->bnapi[i];
4367 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4369 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4373 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4374 u16 cmpl_ring, u16 target_id)
4376 struct input *req = request;
4378 req->req_type = cpu_to_le16(req_type);
4379 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4380 req->target_id = cpu_to_le16(target_id);
4381 if (bnxt_kong_hwrm_message(bp, req))
4382 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4384 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4387 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4390 case HWRM_ERR_CODE_SUCCESS:
4392 case HWRM_ERR_CODE_RESOURCE_LOCKED:
4394 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4396 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4398 case HWRM_ERR_CODE_INVALID_PARAMS:
4399 case HWRM_ERR_CODE_INVALID_FLAGS:
4400 case HWRM_ERR_CODE_INVALID_ENABLES:
4401 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4402 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4404 case HWRM_ERR_CODE_NO_BUFFER:
4406 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4407 case HWRM_ERR_CODE_BUSY:
4409 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4416 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4417 int timeout, bool silent)
4419 int i, intr_process, rc, tmo_count;
4420 struct input *req = msg;
4423 u16 cp_ring_id, len = 0;
4424 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4425 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4426 struct hwrm_short_input short_input = {0};
4427 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4428 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4429 u16 dst = BNXT_HWRM_CHNL_CHIMP;
4431 if (BNXT_NO_FW_ACCESS(bp) &&
4432 le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
4435 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4436 if (msg_len > bp->hwrm_max_ext_req_len ||
4437 !bp->hwrm_short_cmd_req_addr)
4441 if (bnxt_hwrm_kong_chnl(bp, req)) {
4442 dst = BNXT_HWRM_CHNL_KONG;
4443 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4444 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4445 resp = bp->hwrm_cmd_kong_resp_addr;
4448 memset(resp, 0, PAGE_SIZE);
4449 cp_ring_id = le16_to_cpu(req->cmpl_ring);
4450 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4452 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4453 /* currently supports only one outstanding message */
4455 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4457 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4458 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4459 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4462 /* Set boundary for maximum extended request length for short
4463 * cmd format. If passed up from device use the max supported
4464 * internal req length.
4466 max_msg_len = bp->hwrm_max_ext_req_len;
4468 memcpy(short_cmd_req, req, msg_len);
4469 if (msg_len < max_msg_len)
4470 memset(short_cmd_req + msg_len, 0,
4471 max_msg_len - msg_len);
4473 short_input.req_type = req->req_type;
4474 short_input.signature =
4475 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4476 short_input.size = cpu_to_le16(msg_len);
4477 short_input.req_addr =
4478 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4480 data = (u32 *)&short_input;
4481 msg_len = sizeof(short_input);
4483 /* Sync memory write before updating doorbell */
4486 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4489 /* Write request msg to hwrm channel */
4490 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4492 for (i = msg_len; i < max_req_len; i += 4)
4493 writel(0, bp->bar0 + bar_offset + i);
4495 /* Ring channel doorbell */
4496 writel(1, bp->bar0 + doorbell_offset);
4498 if (!pci_is_enabled(bp->pdev))
4502 timeout = DFLT_HWRM_CMD_TIMEOUT;
4503 /* Limit timeout to an upper limit */
4504 timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT);
4505 /* convert timeout to usec */
4509 /* Short timeout for the first few iterations:
4510 * number of loops = number of loops for short timeout +
4511 * number of loops for standard timeout.
4513 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4514 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4515 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4518 u16 seq_id = bp->hwrm_intr_seq_id;
4520 /* Wait until hwrm response cmpl interrupt is processed */
4521 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4523 /* Abort the wait for completion if the FW health
4526 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4528 /* on first few passes, just barely sleep */
4529 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
4530 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4531 HWRM_SHORT_MAX_TIMEOUT);
4533 if (HWRM_WAIT_MUST_ABORT(bp, req))
4535 usleep_range(HWRM_MIN_TIMEOUT,
4540 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4542 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4543 le16_to_cpu(req->req_type));
4546 len = le16_to_cpu(resp->resp_len);
4547 valid = ((u8 *)resp) + len - 1;
4551 /* Check if response len is updated */
4552 for (i = 0; i < tmo_count; i++) {
4553 /* Abort the wait for completion if the FW health
4556 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4558 len = le16_to_cpu(resp->resp_len);
4561 /* on first few passes, just barely sleep */
4562 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
4563 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4564 HWRM_SHORT_MAX_TIMEOUT);
4566 if (HWRM_WAIT_MUST_ABORT(bp, req))
4568 usleep_range(HWRM_MIN_TIMEOUT,
4573 if (i >= tmo_count) {
4576 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4577 HWRM_TOTAL_TIMEOUT(i),
4578 le16_to_cpu(req->req_type),
4579 le16_to_cpu(req->seq_id), len);
4583 /* Last byte of resp contains valid bit */
4584 valid = ((u8 *)resp) + len - 1;
4585 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4586 /* make sure we read from updated DMA memory */
4593 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4595 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4596 HWRM_TOTAL_TIMEOUT(i),
4597 le16_to_cpu(req->req_type),
4598 le16_to_cpu(req->seq_id), len,
4604 /* Zero valid bit for compatibility. Valid bit in an older spec
4605 * may become a new field in a newer spec. We must make sure that
4606 * a new field not implemented by old spec will read zero.
4609 rc = le16_to_cpu(resp->error_code);
4611 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4612 le16_to_cpu(resp->req_type),
4613 le16_to_cpu(resp->seq_id), rc);
4614 return bnxt_hwrm_to_stderr(rc);
4617 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4619 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4622 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4625 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4628 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4632 mutex_lock(&bp->hwrm_cmd_lock);
4633 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4634 mutex_unlock(&bp->hwrm_cmd_lock);
4638 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4643 mutex_lock(&bp->hwrm_cmd_lock);
4644 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4645 mutex_unlock(&bp->hwrm_cmd_lock);
4649 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4652 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4653 struct hwrm_func_drv_rgtr_input req = {0};
4654 DECLARE_BITMAP(async_events_bmap, 256);
4655 u32 *events = (u32 *)async_events_bmap;
4659 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4662 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4663 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4664 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4666 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4667 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4668 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4669 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4670 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4671 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4672 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4673 req.flags = cpu_to_le32(flags);
4674 req.ver_maj_8b = DRV_VER_MAJ;
4675 req.ver_min_8b = DRV_VER_MIN;
4676 req.ver_upd_8b = DRV_VER_UPD;
4677 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4678 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4679 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4685 memset(data, 0, sizeof(data));
4686 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4687 u16 cmd = bnxt_vf_req_snif[i];
4688 unsigned int bit, idx;
4692 data[idx] |= 1 << bit;
4695 for (i = 0; i < 8; i++)
4696 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4699 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4702 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4703 req.flags |= cpu_to_le32(
4704 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4706 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4707 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4708 u16 event_id = bnxt_async_events_arr[i];
4710 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4711 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4713 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4715 if (bmap && bmap_size) {
4716 for (i = 0; i < bmap_size; i++) {
4717 if (test_bit(i, bmap))
4718 __set_bit(i, async_events_bmap);
4721 for (i = 0; i < 8; i++)
4722 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4726 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4728 mutex_lock(&bp->hwrm_cmd_lock);
4729 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4731 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4733 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4734 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4736 mutex_unlock(&bp->hwrm_cmd_lock);
4740 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4742 struct hwrm_func_drv_unrgtr_input req = {0};
4744 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4747 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4748 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4751 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4754 struct hwrm_tunnel_dst_port_free_input req = {0};
4756 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4757 req.tunnel_type = tunnel_type;
4759 switch (tunnel_type) {
4760 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4761 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4762 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4764 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4765 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4766 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4772 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4774 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4779 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4783 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4784 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4786 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4788 req.tunnel_type = tunnel_type;
4789 req.tunnel_dst_port_val = port;
4791 mutex_lock(&bp->hwrm_cmd_lock);
4792 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4794 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4799 switch (tunnel_type) {
4800 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4801 bp->vxlan_fw_dst_port_id =
4802 le16_to_cpu(resp->tunnel_dst_port_id);
4804 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4805 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4812 mutex_unlock(&bp->hwrm_cmd_lock);
4816 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4818 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4819 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4821 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4822 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4824 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4825 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4826 req.mask = cpu_to_le32(vnic->rx_mask);
4827 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4830 #ifdef CONFIG_RFS_ACCEL
4831 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4832 struct bnxt_ntuple_filter *fltr)
4834 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4836 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4837 req.ntuple_filter_id = fltr->filter_id;
4838 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4841 #define BNXT_NTP_FLTR_FLAGS \
4842 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4843 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4844 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4845 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4846 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4847 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4848 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4849 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4850 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4851 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4852 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4853 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4854 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
4855 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4857 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
4858 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4860 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4861 struct bnxt_ntuple_filter *fltr)
4863 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4864 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4865 struct flow_keys *keys = &fltr->fkeys;
4866 struct bnxt_vnic_info *vnic;
4870 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4871 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4873 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4874 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4875 req.dst_id = cpu_to_le16(fltr->rxq);
4877 vnic = &bp->vnic_info[fltr->rxq + 1];
4878 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4880 req.flags = cpu_to_le32(flags);
4881 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4883 req.ethertype = htons(ETH_P_IP);
4884 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4885 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4886 req.ip_protocol = keys->basic.ip_proto;
4888 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4891 req.ethertype = htons(ETH_P_IPV6);
4893 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4894 *(struct in6_addr *)&req.src_ipaddr[0] =
4895 keys->addrs.v6addrs.src;
4896 *(struct in6_addr *)&req.dst_ipaddr[0] =
4897 keys->addrs.v6addrs.dst;
4898 for (i = 0; i < 4; i++) {
4899 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4900 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4903 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4904 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4905 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4906 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4908 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4909 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4911 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4914 req.src_port = keys->ports.src;
4915 req.src_port_mask = cpu_to_be16(0xffff);
4916 req.dst_port = keys->ports.dst;
4917 req.dst_port_mask = cpu_to_be16(0xffff);
4919 mutex_lock(&bp->hwrm_cmd_lock);
4920 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4922 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4923 fltr->filter_id = resp->ntuple_filter_id;
4925 mutex_unlock(&bp->hwrm_cmd_lock);
4930 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4934 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4935 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4937 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4938 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4939 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4941 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4942 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4944 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4945 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4946 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4947 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4948 req.l2_addr_mask[0] = 0xff;
4949 req.l2_addr_mask[1] = 0xff;
4950 req.l2_addr_mask[2] = 0xff;
4951 req.l2_addr_mask[3] = 0xff;
4952 req.l2_addr_mask[4] = 0xff;
4953 req.l2_addr_mask[5] = 0xff;
4955 mutex_lock(&bp->hwrm_cmd_lock);
4956 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4958 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4960 mutex_unlock(&bp->hwrm_cmd_lock);
4964 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4966 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4969 /* Any associated ntuple filters will also be cleared by firmware. */
4970 mutex_lock(&bp->hwrm_cmd_lock);
4971 for (i = 0; i < num_of_vnics; i++) {
4972 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4974 for (j = 0; j < vnic->uc_filter_count; j++) {
4975 struct hwrm_cfa_l2_filter_free_input req = {0};
4977 bnxt_hwrm_cmd_hdr_init(bp, &req,
4978 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4980 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4982 rc = _hwrm_send_message(bp, &req, sizeof(req),
4985 vnic->uc_filter_count = 0;
4987 mutex_unlock(&bp->hwrm_cmd_lock);
4992 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4994 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4995 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4996 struct hwrm_vnic_tpa_cfg_input req = {0};
4998 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5001 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
5004 u16 mss = bp->dev->mtu - 40;
5005 u32 nsegs, n, segs = 0, flags;
5007 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5008 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5009 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5010 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5011 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5012 if (tpa_flags & BNXT_FLAG_GRO)
5013 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5015 req.flags = cpu_to_le32(flags);
5018 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5019 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5020 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5022 /* Number of segs are log2 units, and first packet is not
5023 * included as part of this units.
5025 if (mss <= BNXT_RX_PAGE_SIZE) {
5026 n = BNXT_RX_PAGE_SIZE / mss;
5027 nsegs = (MAX_SKB_FRAGS - 1) * n;
5029 n = mss / BNXT_RX_PAGE_SIZE;
5030 if (mss & (BNXT_RX_PAGE_SIZE - 1))
5032 nsegs = (MAX_SKB_FRAGS - n) / n;
5035 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5036 segs = MAX_TPA_SEGS_P5;
5037 max_aggs = bp->max_tpa;
5039 segs = ilog2(nsegs);
5041 req.max_agg_segs = cpu_to_le16(segs);
5042 req.max_aggs = cpu_to_le16(max_aggs);
5044 req.min_agg_len = cpu_to_le32(512);
5046 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5048 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5051 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5053 struct bnxt_ring_grp_info *grp_info;
5055 grp_info = &bp->grp_info[ring->grp_idx];
5056 return grp_info->cp_fw_ring_id;
5059 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5061 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5062 struct bnxt_napi *bnapi = rxr->bnapi;
5063 struct bnxt_cp_ring_info *cpr;
5065 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5066 return cpr->cp_ring_struct.fw_ring_id;
5068 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5072 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5074 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5075 struct bnxt_napi *bnapi = txr->bnapi;
5076 struct bnxt_cp_ring_info *cpr;
5078 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5079 return cpr->cp_ring_struct.fw_ring_id;
5081 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5085 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5089 if (bp->flags & BNXT_FLAG_CHIP_P5)
5090 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5092 entries = HW_HASH_INDEX_SIZE;
5094 bp->rss_indir_tbl_entries = entries;
5095 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5097 if (!bp->rss_indir_tbl)
5102 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5104 u16 max_rings, max_entries, pad, i;
5106 if (!bp->rx_nr_rings)
5109 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5110 max_rings = bp->rx_nr_rings - 1;
5112 max_rings = bp->rx_nr_rings;
5114 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5116 for (i = 0; i < max_entries; i++)
5117 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5119 pad = bp->rss_indir_tbl_entries - max_entries;
5121 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5124 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5126 u16 i, tbl_size, max_ring = 0;
5128 if (!bp->rss_indir_tbl)
5131 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5132 for (i = 0; i < tbl_size; i++)
5133 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5137 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5139 if (bp->flags & BNXT_FLAG_CHIP_P5)
5140 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5141 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5146 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5148 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5151 /* Fill the RSS indirection table with ring group ids */
5152 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5154 j = bp->rss_indir_tbl[i];
5155 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5159 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5160 struct bnxt_vnic_info *vnic)
5162 __le16 *ring_tbl = vnic->rss_table;
5163 struct bnxt_rx_ring_info *rxr;
5166 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5168 for (i = 0; i < tbl_size; i++) {
5171 j = bp->rss_indir_tbl[i];
5172 rxr = &bp->rx_ring[j];
5174 ring_id = rxr->rx_ring_struct.fw_ring_id;
5175 *ring_tbl++ = cpu_to_le16(ring_id);
5176 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5177 *ring_tbl++ = cpu_to_le16(ring_id);
5181 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5183 if (bp->flags & BNXT_FLAG_CHIP_P5)
5184 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5186 __bnxt_fill_hw_rss_tbl(bp, vnic);
5189 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5191 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5192 struct hwrm_vnic_rss_cfg_input req = {0};
5194 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5195 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5198 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5200 bnxt_fill_hw_rss_tbl(bp, vnic);
5201 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5202 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5203 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5204 req.hash_key_tbl_addr =
5205 cpu_to_le64(vnic->rss_hash_key_dma_addr);
5207 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5208 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5211 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5213 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5214 struct hwrm_vnic_rss_cfg_input req = {0};
5215 dma_addr_t ring_tbl_map;
5218 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5219 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5221 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5224 bnxt_fill_hw_rss_tbl(bp, vnic);
5225 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5226 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5227 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5228 ring_tbl_map = vnic->rss_table_dma_addr;
5229 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5230 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5233 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5234 req.ring_table_pair_index = i;
5235 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5236 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5243 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5245 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5246 struct hwrm_vnic_plcmodes_cfg_input req = {0};
5248 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5249 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5250 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5251 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5253 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5254 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5255 /* thresholds not implemented in firmware yet */
5256 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5257 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5258 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5259 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5262 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5265 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
5267 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5268 req.rss_cos_lb_ctx_id =
5269 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5271 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5272 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5275 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5279 for (i = 0; i < bp->nr_vnics; i++) {
5280 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5282 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5283 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5284 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5287 bp->rsscos_nr_ctxs = 0;
5290 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5293 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
5294 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
5295 bp->hwrm_cmd_resp_addr;
5297 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5300 mutex_lock(&bp->hwrm_cmd_lock);
5301 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5303 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5304 le16_to_cpu(resp->rss_cos_lb_ctx_id);
5305 mutex_unlock(&bp->hwrm_cmd_lock);
5310 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5312 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5313 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5314 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5317 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5319 unsigned int ring = 0, grp_idx;
5320 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5321 struct hwrm_vnic_cfg_input req = {0};
5324 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5326 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5327 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5329 req.default_rx_ring_id =
5330 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5331 req.default_cmpl_ring_id =
5332 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5334 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5335 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5338 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5339 /* Only RSS support for now TBD: COS & LB */
5340 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5341 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5342 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5343 VNIC_CFG_REQ_ENABLES_MRU);
5344 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5346 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5347 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5348 VNIC_CFG_REQ_ENABLES_MRU);
5349 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5351 req.rss_rule = cpu_to_le16(0xffff);
5354 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5355 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5356 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5357 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5359 req.cos_rule = cpu_to_le16(0xffff);
5362 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5364 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5366 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5367 ring = bp->rx_nr_rings - 1;
5369 grp_idx = bp->rx_ring[ring].bnapi->index;
5370 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5371 req.lb_rule = cpu_to_le16(0xffff);
5373 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5375 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5376 #ifdef CONFIG_BNXT_SRIOV
5378 def_vlan = bp->vf.vlan;
5380 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5381 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5382 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5383 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5385 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5388 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5390 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5391 struct hwrm_vnic_free_input req = {0};
5393 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5395 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5397 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5398 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5402 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5406 for (i = 0; i < bp->nr_vnics; i++)
5407 bnxt_hwrm_vnic_free_one(bp, i);
5410 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5411 unsigned int start_rx_ring_idx,
5412 unsigned int nr_rings)
5415 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5416 struct hwrm_vnic_alloc_input req = {0};
5417 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5418 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5420 if (bp->flags & BNXT_FLAG_CHIP_P5)
5421 goto vnic_no_ring_grps;
5423 /* map ring groups to this vnic */
5424 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5425 grp_idx = bp->rx_ring[i].bnapi->index;
5426 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5427 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5431 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5435 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5436 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5438 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5440 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5442 mutex_lock(&bp->hwrm_cmd_lock);
5443 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5445 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5446 mutex_unlock(&bp->hwrm_cmd_lock);
5450 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5452 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5453 struct hwrm_vnic_qcaps_input req = {0};
5456 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5457 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5458 if (bp->hwrm_spec_code < 0x10600)
5461 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5462 mutex_lock(&bp->hwrm_cmd_lock);
5463 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5465 u32 flags = le32_to_cpu(resp->flags);
5467 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5468 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5469 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5471 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5472 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5474 /* Older P5 fw before EXT_HW_STATS support did not set
5475 * VLAN_STRIP_CAP properly.
5477 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5478 (BNXT_CHIP_P5_THOR(bp) &&
5479 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5480 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5481 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5482 if (bp->max_tpa_v2) {
5483 if (BNXT_CHIP_P5_THOR(bp))
5484 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5486 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5489 mutex_unlock(&bp->hwrm_cmd_lock);
5493 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5498 if (bp->flags & BNXT_FLAG_CHIP_P5)
5501 mutex_lock(&bp->hwrm_cmd_lock);
5502 for (i = 0; i < bp->rx_nr_rings; i++) {
5503 struct hwrm_ring_grp_alloc_input req = {0};
5504 struct hwrm_ring_grp_alloc_output *resp =
5505 bp->hwrm_cmd_resp_addr;
5506 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5508 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5510 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5511 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5512 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5513 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5515 rc = _hwrm_send_message(bp, &req, sizeof(req),
5520 bp->grp_info[grp_idx].fw_grp_id =
5521 le32_to_cpu(resp->ring_group_id);
5523 mutex_unlock(&bp->hwrm_cmd_lock);
5527 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5530 struct hwrm_ring_grp_free_input req = {0};
5532 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5535 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5537 mutex_lock(&bp->hwrm_cmd_lock);
5538 for (i = 0; i < bp->cp_nr_rings; i++) {
5539 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5542 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5544 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5545 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5547 mutex_unlock(&bp->hwrm_cmd_lock);
5550 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5551 struct bnxt_ring_struct *ring,
5552 u32 ring_type, u32 map_index)
5554 int rc = 0, err = 0;
5555 struct hwrm_ring_alloc_input req = {0};
5556 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5557 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5558 struct bnxt_ring_grp_info *grp_info;
5561 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5564 if (rmem->nr_pages > 1) {
5565 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5566 /* Page size is in log2 units */
5567 req.page_size = BNXT_PAGE_SHIFT;
5568 req.page_tbl_depth = 1;
5570 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
5573 /* Association of ring index with doorbell index and MSIX number */
5574 req.logical_id = cpu_to_le16(map_index);
5576 switch (ring_type) {
5577 case HWRM_RING_ALLOC_TX: {
5578 struct bnxt_tx_ring_info *txr;
5580 txr = container_of(ring, struct bnxt_tx_ring_info,
5582 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5583 /* Association of transmit ring with completion ring */
5584 grp_info = &bp->grp_info[ring->grp_idx];
5585 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5586 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5587 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5588 req.queue_id = cpu_to_le16(ring->queue_id);
5591 case HWRM_RING_ALLOC_RX:
5592 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5593 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5594 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5597 /* Association of rx ring with stats context */
5598 grp_info = &bp->grp_info[ring->grp_idx];
5599 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5600 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5601 req.enables |= cpu_to_le32(
5602 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5603 if (NET_IP_ALIGN == 2)
5604 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5605 req.flags = cpu_to_le16(flags);
5608 case HWRM_RING_ALLOC_AGG:
5609 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5610 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5611 /* Association of agg ring with rx ring */
5612 grp_info = &bp->grp_info[ring->grp_idx];
5613 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5614 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5615 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5616 req.enables |= cpu_to_le32(
5617 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5618 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5620 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5622 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5624 case HWRM_RING_ALLOC_CMPL:
5625 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5626 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5627 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5628 /* Association of cp ring with nq */
5629 grp_info = &bp->grp_info[map_index];
5630 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5631 req.cq_handle = cpu_to_le64(ring->handle);
5632 req.enables |= cpu_to_le32(
5633 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5634 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5635 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5638 case HWRM_RING_ALLOC_NQ:
5639 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5640 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5641 if (bp->flags & BNXT_FLAG_USING_MSIX)
5642 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5645 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5650 mutex_lock(&bp->hwrm_cmd_lock);
5651 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5652 err = le16_to_cpu(resp->error_code);
5653 ring_id = le16_to_cpu(resp->ring_id);
5654 mutex_unlock(&bp->hwrm_cmd_lock);
5657 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5658 ring_type, rc, err);
5661 ring->fw_ring_id = ring_id;
5665 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5670 struct hwrm_func_cfg_input req = {0};
5672 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5673 req.fid = cpu_to_le16(0xffff);
5674 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5675 req.async_event_cr = cpu_to_le16(idx);
5676 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5678 struct hwrm_func_vf_cfg_input req = {0};
5680 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5682 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5683 req.async_event_cr = cpu_to_le16(idx);
5684 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5689 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5690 u32 map_idx, u32 xid)
5692 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5694 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5696 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5697 switch (ring_type) {
5698 case HWRM_RING_ALLOC_TX:
5699 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5701 case HWRM_RING_ALLOC_RX:
5702 case HWRM_RING_ALLOC_AGG:
5703 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5705 case HWRM_RING_ALLOC_CMPL:
5706 db->db_key64 = DBR_PATH_L2;
5708 case HWRM_RING_ALLOC_NQ:
5709 db->db_key64 = DBR_PATH_L2;
5712 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5714 db->doorbell = bp->bar1 + map_idx * 0x80;
5715 switch (ring_type) {
5716 case HWRM_RING_ALLOC_TX:
5717 db->db_key32 = DB_KEY_TX;
5719 case HWRM_RING_ALLOC_RX:
5720 case HWRM_RING_ALLOC_AGG:
5721 db->db_key32 = DB_KEY_RX;
5723 case HWRM_RING_ALLOC_CMPL:
5724 db->db_key32 = DB_KEY_CP;
5730 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5732 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5736 if (bp->flags & BNXT_FLAG_CHIP_P5)
5737 type = HWRM_RING_ALLOC_NQ;
5739 type = HWRM_RING_ALLOC_CMPL;
5740 for (i = 0; i < bp->cp_nr_rings; i++) {
5741 struct bnxt_napi *bnapi = bp->bnapi[i];
5742 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5743 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5744 u32 map_idx = ring->map_idx;
5745 unsigned int vector;
5747 vector = bp->irq_tbl[map_idx].vector;
5748 disable_irq_nosync(vector);
5749 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5754 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5755 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5757 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5760 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5762 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5766 type = HWRM_RING_ALLOC_TX;
5767 for (i = 0; i < bp->tx_nr_rings; i++) {
5768 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5769 struct bnxt_ring_struct *ring;
5772 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5773 struct bnxt_napi *bnapi = txr->bnapi;
5774 struct bnxt_cp_ring_info *cpr, *cpr2;
5775 u32 type2 = HWRM_RING_ALLOC_CMPL;
5777 cpr = &bnapi->cp_ring;
5778 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5779 ring = &cpr2->cp_ring_struct;
5780 ring->handle = BNXT_TX_HDL;
5781 map_idx = bnapi->index;
5782 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5785 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5787 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5789 ring = &txr->tx_ring_struct;
5791 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5794 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5797 type = HWRM_RING_ALLOC_RX;
5798 for (i = 0; i < bp->rx_nr_rings; i++) {
5799 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5800 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5801 struct bnxt_napi *bnapi = rxr->bnapi;
5802 u32 map_idx = bnapi->index;
5804 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5807 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5808 /* If we have agg rings, post agg buffers first. */
5810 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5811 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5812 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5813 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5814 u32 type2 = HWRM_RING_ALLOC_CMPL;
5815 struct bnxt_cp_ring_info *cpr2;
5817 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5818 ring = &cpr2->cp_ring_struct;
5819 ring->handle = BNXT_RX_HDL;
5820 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5823 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5825 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5830 type = HWRM_RING_ALLOC_AGG;
5831 for (i = 0; i < bp->rx_nr_rings; i++) {
5832 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5833 struct bnxt_ring_struct *ring =
5834 &rxr->rx_agg_ring_struct;
5835 u32 grp_idx = ring->grp_idx;
5836 u32 map_idx = grp_idx + bp->rx_nr_rings;
5838 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5842 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5844 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5845 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5846 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5853 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5854 struct bnxt_ring_struct *ring,
5855 u32 ring_type, int cmpl_ring_id)
5858 struct hwrm_ring_free_input req = {0};
5859 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5862 if (BNXT_NO_FW_ACCESS(bp))
5865 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5866 req.ring_type = ring_type;
5867 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5869 mutex_lock(&bp->hwrm_cmd_lock);
5870 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5871 error_code = le16_to_cpu(resp->error_code);
5872 mutex_unlock(&bp->hwrm_cmd_lock);
5874 if (rc || error_code) {
5875 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5876 ring_type, rc, error_code);
5882 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5890 for (i = 0; i < bp->tx_nr_rings; i++) {
5891 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5892 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5894 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5895 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5897 hwrm_ring_free_send_msg(bp, ring,
5898 RING_FREE_REQ_RING_TYPE_TX,
5899 close_path ? cmpl_ring_id :
5900 INVALID_HW_RING_ID);
5901 ring->fw_ring_id = INVALID_HW_RING_ID;
5905 for (i = 0; i < bp->rx_nr_rings; i++) {
5906 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5907 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5908 u32 grp_idx = rxr->bnapi->index;
5910 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5911 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5913 hwrm_ring_free_send_msg(bp, ring,
5914 RING_FREE_REQ_RING_TYPE_RX,
5915 close_path ? cmpl_ring_id :
5916 INVALID_HW_RING_ID);
5917 ring->fw_ring_id = INVALID_HW_RING_ID;
5918 bp->grp_info[grp_idx].rx_fw_ring_id =
5923 if (bp->flags & BNXT_FLAG_CHIP_P5)
5924 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5926 type = RING_FREE_REQ_RING_TYPE_RX;
5927 for (i = 0; i < bp->rx_nr_rings; i++) {
5928 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5929 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5930 u32 grp_idx = rxr->bnapi->index;
5932 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5933 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5935 hwrm_ring_free_send_msg(bp, ring, type,
5936 close_path ? cmpl_ring_id :
5937 INVALID_HW_RING_ID);
5938 ring->fw_ring_id = INVALID_HW_RING_ID;
5939 bp->grp_info[grp_idx].agg_fw_ring_id =
5944 /* The completion rings are about to be freed. After that the
5945 * IRQ doorbell will not work anymore. So we need to disable
5948 bnxt_disable_int_sync(bp);
5950 if (bp->flags & BNXT_FLAG_CHIP_P5)
5951 type = RING_FREE_REQ_RING_TYPE_NQ;
5953 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5954 for (i = 0; i < bp->cp_nr_rings; i++) {
5955 struct bnxt_napi *bnapi = bp->bnapi[i];
5956 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5957 struct bnxt_ring_struct *ring;
5960 for (j = 0; j < 2; j++) {
5961 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5964 ring = &cpr2->cp_ring_struct;
5965 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5967 hwrm_ring_free_send_msg(bp, ring,
5968 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5969 INVALID_HW_RING_ID);
5970 ring->fw_ring_id = INVALID_HW_RING_ID;
5973 ring = &cpr->cp_ring_struct;
5974 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5975 hwrm_ring_free_send_msg(bp, ring, type,
5976 INVALID_HW_RING_ID);
5977 ring->fw_ring_id = INVALID_HW_RING_ID;
5978 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5983 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5986 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5988 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5989 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5990 struct hwrm_func_qcfg_input req = {0};
5993 if (bp->hwrm_spec_code < 0x10601)
5996 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5997 req.fid = cpu_to_le16(0xffff);
5998 mutex_lock(&bp->hwrm_cmd_lock);
5999 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6001 mutex_unlock(&bp->hwrm_cmd_lock);
6005 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6006 if (BNXT_NEW_RM(bp)) {
6009 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6010 hw_resc->resv_hw_ring_grps =
6011 le32_to_cpu(resp->alloc_hw_ring_grps);
6012 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6013 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6014 stats = le16_to_cpu(resp->alloc_stat_ctx);
6015 hw_resc->resv_irqs = cp;
6016 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6017 int rx = hw_resc->resv_rx_rings;
6018 int tx = hw_resc->resv_tx_rings;
6020 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6022 if (cp < (rx + tx)) {
6023 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6024 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6026 hw_resc->resv_rx_rings = rx;
6027 hw_resc->resv_tx_rings = tx;
6029 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6030 hw_resc->resv_hw_ring_grps = rx;
6032 hw_resc->resv_cp_rings = cp;
6033 hw_resc->resv_stat_ctxs = stats;
6035 mutex_unlock(&bp->hwrm_cmd_lock);
6039 /* Caller must hold bp->hwrm_cmd_lock */
6040 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6042 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6043 struct hwrm_func_qcfg_input req = {0};
6046 if (bp->hwrm_spec_code < 0x10601)
6049 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6050 req.fid = cpu_to_le16(fid);
6051 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6053 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6058 static bool bnxt_rfs_supported(struct bnxt *bp);
6061 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
6062 int tx_rings, int rx_rings, int ring_grps,
6063 int cp_rings, int stats, int vnics)
6067 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
6068 req->fid = cpu_to_le16(0xffff);
6069 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6070 req->num_tx_rings = cpu_to_le16(tx_rings);
6071 if (BNXT_NEW_RM(bp)) {
6072 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6073 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6074 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6075 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6076 enables |= tx_rings + ring_grps ?
6077 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6078 enables |= rx_rings ?
6079 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6081 enables |= cp_rings ?
6082 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6083 enables |= ring_grps ?
6084 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6085 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6087 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6089 req->num_rx_rings = cpu_to_le16(rx_rings);
6090 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6091 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6092 req->num_msix = cpu_to_le16(cp_rings);
6093 req->num_rsscos_ctxs =
6094 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6096 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6097 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6098 req->num_rsscos_ctxs = cpu_to_le16(1);
6099 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6100 bnxt_rfs_supported(bp))
6101 req->num_rsscos_ctxs =
6102 cpu_to_le16(ring_grps + 1);
6104 req->num_stat_ctxs = cpu_to_le16(stats);
6105 req->num_vnics = cpu_to_le16(vnics);
6107 req->enables = cpu_to_le32(enables);
6111 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
6112 struct hwrm_func_vf_cfg_input *req, int tx_rings,
6113 int rx_rings, int ring_grps, int cp_rings,
6114 int stats, int vnics)
6118 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
6119 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6120 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6121 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6122 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6123 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6124 enables |= tx_rings + ring_grps ?
6125 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6127 enables |= cp_rings ?
6128 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6129 enables |= ring_grps ?
6130 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6132 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6133 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6135 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6136 req->num_tx_rings = cpu_to_le16(tx_rings);
6137 req->num_rx_rings = cpu_to_le16(rx_rings);
6138 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6139 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6140 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6142 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6143 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6144 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6146 req->num_stat_ctxs = cpu_to_le16(stats);
6147 req->num_vnics = cpu_to_le16(vnics);
6149 req->enables = cpu_to_le32(enables);
6153 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6154 int ring_grps, int cp_rings, int stats, int vnics)
6156 struct hwrm_func_cfg_input req = {0};
6159 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6160 cp_rings, stats, vnics);
6164 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6168 if (bp->hwrm_spec_code < 0x10601)
6169 bp->hw_resc.resv_tx_rings = tx_rings;
6171 return bnxt_hwrm_get_rings(bp);
6175 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6176 int ring_grps, int cp_rings, int stats, int vnics)
6178 struct hwrm_func_vf_cfg_input req = {0};
6181 if (!BNXT_NEW_RM(bp)) {
6182 bp->hw_resc.resv_tx_rings = tx_rings;
6186 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6187 cp_rings, stats, vnics);
6188 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6192 return bnxt_hwrm_get_rings(bp);
6195 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6196 int cp, int stat, int vnic)
6199 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6202 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6206 int bnxt_nq_rings_in_use(struct bnxt *bp)
6208 int cp = bp->cp_nr_rings;
6209 int ulp_msix, ulp_base;
6211 ulp_msix = bnxt_get_ulp_msix_num(bp);
6213 ulp_base = bnxt_get_ulp_msix_base(bp);
6215 if ((ulp_base + ulp_msix) > cp)
6216 cp = ulp_base + ulp_msix;
6221 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6225 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6226 return bnxt_nq_rings_in_use(bp);
6228 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6232 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6234 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6235 int cp = bp->cp_nr_rings;
6240 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6241 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6243 return cp + ulp_stat;
6246 /* Check if a default RSS map needs to be setup. This function is only
6247 * used on older firmware that does not require reserving RX rings.
6249 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6251 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6253 /* The RSS map is valid for RX rings set to resv_rx_rings */
6254 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6255 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6256 if (!netif_is_rxfh_configured(bp->dev))
6257 bnxt_set_dflt_rss_indir_tbl(bp);
6261 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6263 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6264 int cp = bnxt_cp_rings_in_use(bp);
6265 int nq = bnxt_nq_rings_in_use(bp);
6266 int rx = bp->rx_nr_rings, stat;
6267 int vnic = 1, grp = rx;
6269 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6270 bp->hwrm_spec_code >= 0x10601)
6273 /* Old firmware does not need RX ring reservations but we still
6274 * need to setup a default RSS map when needed. With new firmware
6275 * we go through RX ring reservations first and then set up the
6276 * RSS map for the successfully reserved RX rings when needed.
6278 if (!BNXT_NEW_RM(bp)) {
6279 bnxt_check_rss_tbl_no_rmgr(bp);
6282 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6284 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6286 stat = bnxt_get_func_stat_ctxs(bp);
6287 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6288 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6289 (hw_resc->resv_hw_ring_grps != grp &&
6290 !(bp->flags & BNXT_FLAG_CHIP_P5)))
6292 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6293 hw_resc->resv_irqs != nq)
6298 static int __bnxt_reserve_rings(struct bnxt *bp)
6300 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6301 int cp = bnxt_nq_rings_in_use(bp);
6302 int tx = bp->tx_nr_rings;
6303 int rx = bp->rx_nr_rings;
6304 int grp, rx_rings, rc;
6308 if (!bnxt_need_reserve_rings(bp))
6311 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6313 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6315 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6317 grp = bp->rx_nr_rings;
6318 stat = bnxt_get_func_stat_ctxs(bp);
6320 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6324 tx = hw_resc->resv_tx_rings;
6325 if (BNXT_NEW_RM(bp)) {
6326 rx = hw_resc->resv_rx_rings;
6327 cp = hw_resc->resv_irqs;
6328 grp = hw_resc->resv_hw_ring_grps;
6329 vnic = hw_resc->resv_vnics;
6330 stat = hw_resc->resv_stat_ctxs;
6334 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6338 if (netif_running(bp->dev))
6341 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6342 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6343 bp->dev->hw_features &= ~NETIF_F_LRO;
6344 bp->dev->features &= ~NETIF_F_LRO;
6345 bnxt_set_ring_params(bp);
6348 rx_rings = min_t(int, rx_rings, grp);
6349 cp = min_t(int, cp, bp->cp_nr_rings);
6350 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6351 stat -= bnxt_get_ulp_stat_ctxs(bp);
6352 cp = min_t(int, cp, stat);
6353 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6354 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6356 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6357 bp->tx_nr_rings = tx;
6359 /* If we cannot reserve all the RX rings, reset the RSS map only
6360 * if absolutely necessary
6362 if (rx_rings != bp->rx_nr_rings) {
6363 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6364 rx_rings, bp->rx_nr_rings);
6365 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6366 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6367 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6368 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6369 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6370 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6373 bp->rx_nr_rings = rx_rings;
6374 bp->cp_nr_rings = cp;
6376 if (!tx || !rx || !cp || !grp || !vnic || !stat)
6379 if (!netif_is_rxfh_configured(bp->dev))
6380 bnxt_set_dflt_rss_indir_tbl(bp);
6385 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6386 int ring_grps, int cp_rings, int stats,
6389 struct hwrm_func_vf_cfg_input req = {0};
6392 if (!BNXT_NEW_RM(bp))
6395 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6396 cp_rings, stats, vnics);
6397 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6398 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6399 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6400 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6401 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6402 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6403 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6404 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6406 req.flags = cpu_to_le32(flags);
6407 return hwrm_send_message_silent(bp, &req, sizeof(req),
6411 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6412 int ring_grps, int cp_rings, int stats,
6415 struct hwrm_func_cfg_input req = {0};
6418 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6419 cp_rings, stats, vnics);
6420 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6421 if (BNXT_NEW_RM(bp)) {
6422 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6423 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6424 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6425 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6426 if (bp->flags & BNXT_FLAG_CHIP_P5)
6427 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6428 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6430 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6433 req.flags = cpu_to_le32(flags);
6434 return hwrm_send_message_silent(bp, &req, sizeof(req),
6438 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6439 int ring_grps, int cp_rings, int stats,
6442 if (bp->hwrm_spec_code < 0x10801)
6446 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6447 ring_grps, cp_rings, stats,
6450 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6451 cp_rings, stats, vnics);
6454 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6456 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6457 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6458 struct hwrm_ring_aggint_qcaps_input req = {0};
6461 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6462 coal_cap->num_cmpl_dma_aggr_max = 63;
6463 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6464 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6465 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6466 coal_cap->int_lat_tmr_min_max = 65535;
6467 coal_cap->int_lat_tmr_max_max = 65535;
6468 coal_cap->num_cmpl_aggr_int_max = 65535;
6469 coal_cap->timer_units = 80;
6471 if (bp->hwrm_spec_code < 0x10902)
6474 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6475 mutex_lock(&bp->hwrm_cmd_lock);
6476 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6478 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6479 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6480 coal_cap->num_cmpl_dma_aggr_max =
6481 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6482 coal_cap->num_cmpl_dma_aggr_during_int_max =
6483 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6484 coal_cap->cmpl_aggr_dma_tmr_max =
6485 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6486 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6487 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6488 coal_cap->int_lat_tmr_min_max =
6489 le16_to_cpu(resp->int_lat_tmr_min_max);
6490 coal_cap->int_lat_tmr_max_max =
6491 le16_to_cpu(resp->int_lat_tmr_max_max);
6492 coal_cap->num_cmpl_aggr_int_max =
6493 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6494 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6496 mutex_unlock(&bp->hwrm_cmd_lock);
6499 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6501 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6503 return usec * 1000 / coal_cap->timer_units;
6506 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6507 struct bnxt_coal *hw_coal,
6508 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6510 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6511 u32 cmpl_params = coal_cap->cmpl_params;
6512 u16 val, tmr, max, flags = 0;
6514 max = hw_coal->bufs_per_record * 128;
6515 if (hw_coal->budget)
6516 max = hw_coal->bufs_per_record * hw_coal->budget;
6517 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6519 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6520 req->num_cmpl_aggr_int = cpu_to_le16(val);
6522 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6523 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6525 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6526 coal_cap->num_cmpl_dma_aggr_during_int_max);
6527 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6529 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6530 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6531 req->int_lat_tmr_max = cpu_to_le16(tmr);
6533 /* min timer set to 1/2 of interrupt timer */
6534 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6536 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6537 req->int_lat_tmr_min = cpu_to_le16(val);
6538 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6541 /* buf timer set to 1/4 of interrupt timer */
6542 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6543 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6546 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6547 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6548 val = clamp_t(u16, tmr, 1,
6549 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6550 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6552 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6555 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6556 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6557 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6558 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6559 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6560 req->flags = cpu_to_le16(flags);
6561 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6564 /* Caller holds bp->hwrm_cmd_lock */
6565 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6566 struct bnxt_coal *hw_coal)
6568 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6569 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6570 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6571 u32 nq_params = coal_cap->nq_params;
6574 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6577 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6579 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6581 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6583 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6584 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6585 req.int_lat_tmr_min = cpu_to_le16(tmr);
6586 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6587 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6590 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6592 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6593 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6594 struct bnxt_coal coal;
6596 /* Tick values in micro seconds.
6597 * 1 coal_buf x bufs_per_record = 1 completion record.
6599 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6601 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6602 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6604 if (!bnapi->rx_ring)
6607 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6608 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6610 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6612 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6614 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6618 int bnxt_hwrm_set_coal(struct bnxt *bp)
6621 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6624 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6625 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6626 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6627 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6629 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6630 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6632 mutex_lock(&bp->hwrm_cmd_lock);
6633 for (i = 0; i < bp->cp_nr_rings; i++) {
6634 struct bnxt_napi *bnapi = bp->bnapi[i];
6635 struct bnxt_coal *hw_coal;
6639 if (!bnapi->rx_ring) {
6640 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6643 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6645 req->ring_id = cpu_to_le16(ring_id);
6647 rc = _hwrm_send_message(bp, req, sizeof(*req),
6652 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6655 if (bnapi->rx_ring && bnapi->tx_ring) {
6657 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6658 req->ring_id = cpu_to_le16(ring_id);
6659 rc = _hwrm_send_message(bp, req, sizeof(*req),
6665 hw_coal = &bp->rx_coal;
6667 hw_coal = &bp->tx_coal;
6668 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6670 mutex_unlock(&bp->hwrm_cmd_lock);
6674 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6676 struct hwrm_stat_ctx_clr_stats_input req0 = {0};
6677 struct hwrm_stat_ctx_free_input req = {0};
6683 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6686 bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
6687 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6689 mutex_lock(&bp->hwrm_cmd_lock);
6690 for (i = 0; i < bp->cp_nr_rings; i++) {
6691 struct bnxt_napi *bnapi = bp->bnapi[i];
6692 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6694 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6695 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6696 if (BNXT_FW_MAJ(bp) <= 20) {
6697 req0.stat_ctx_id = req.stat_ctx_id;
6698 _hwrm_send_message(bp, &req0, sizeof(req0),
6701 _hwrm_send_message(bp, &req, sizeof(req),
6704 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6707 mutex_unlock(&bp->hwrm_cmd_lock);
6710 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6713 struct hwrm_stat_ctx_alloc_input req = {0};
6714 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6716 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6719 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6721 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6722 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6724 mutex_lock(&bp->hwrm_cmd_lock);
6725 for (i = 0; i < bp->cp_nr_rings; i++) {
6726 struct bnxt_napi *bnapi = bp->bnapi[i];
6727 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6729 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6731 rc = _hwrm_send_message(bp, &req, sizeof(req),
6736 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6738 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6740 mutex_unlock(&bp->hwrm_cmd_lock);
6744 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6746 struct hwrm_func_qcfg_input req = {0};
6747 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6748 u32 min_db_offset = 0;
6752 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6753 req.fid = cpu_to_le16(0xffff);
6754 mutex_lock(&bp->hwrm_cmd_lock);
6755 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6757 goto func_qcfg_exit;
6759 #ifdef CONFIG_BNXT_SRIOV
6761 struct bnxt_vf_info *vf = &bp->vf;
6763 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6765 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6768 flags = le16_to_cpu(resp->flags);
6769 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6770 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6771 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6772 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6773 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6775 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6776 bp->flags |= BNXT_FLAG_MULTI_HOST;
6777 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6778 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6780 switch (resp->port_partition_type) {
6781 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6782 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6783 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6784 bp->port_partition_type = resp->port_partition_type;
6787 if (bp->hwrm_spec_code < 0x10707 ||
6788 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6789 bp->br_mode = BRIDGE_MODE_VEB;
6790 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6791 bp->br_mode = BRIDGE_MODE_VEPA;
6793 bp->br_mode = BRIDGE_MODE_UNDEF;
6795 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6797 bp->max_mtu = BNXT_MAX_MTU;
6800 goto func_qcfg_exit;
6802 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6804 min_db_offset = DB_PF_OFFSET_P5;
6806 min_db_offset = DB_VF_OFFSET_P5;
6808 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6810 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6811 bp->db_size <= min_db_offset)
6812 bp->db_size = pci_resource_len(bp->pdev, 2);
6815 mutex_unlock(&bp->hwrm_cmd_lock);
6819 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6820 struct hwrm_func_backing_store_qcaps_output *resp)
6822 struct bnxt_mem_init *mem_init;
6828 init_val = resp->ctx_kind_initializer;
6829 init_mask = le16_to_cpu(resp->ctx_init_mask);
6830 offset = &resp->qp_init_offset;
6831 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6832 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
6833 mem_init->init_val = init_val;
6834 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6837 if (i == BNXT_CTX_MEM_INIT_STAT)
6838 offset = &resp->stat_init_offset;
6839 if (init_mask & (1 << i))
6840 mem_init->offset = *offset * 4;
6842 mem_init->init_val = 0;
6844 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6845 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6846 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6847 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6848 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6849 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
6852 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6854 struct hwrm_func_backing_store_qcaps_input req = {0};
6855 struct hwrm_func_backing_store_qcaps_output *resp =
6856 bp->hwrm_cmd_resp_addr;
6859 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6862 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6863 mutex_lock(&bp->hwrm_cmd_lock);
6864 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6866 struct bnxt_ctx_pg_info *ctx_pg;
6867 struct bnxt_ctx_mem_info *ctx;
6870 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6875 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6876 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6877 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6878 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6879 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6880 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6881 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6882 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6883 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6884 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6885 ctx->vnic_max_vnic_entries =
6886 le16_to_cpu(resp->vnic_max_vnic_entries);
6887 ctx->vnic_max_ring_table_entries =
6888 le16_to_cpu(resp->vnic_max_ring_table_entries);
6889 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6890 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6891 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6892 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6893 ctx->tqm_min_entries_per_ring =
6894 le32_to_cpu(resp->tqm_min_entries_per_ring);
6895 ctx->tqm_max_entries_per_ring =
6896 le32_to_cpu(resp->tqm_max_entries_per_ring);
6897 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6898 if (!ctx->tqm_entries_multiple)
6899 ctx->tqm_entries_multiple = 1;
6900 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6901 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6902 ctx->mrav_num_entries_units =
6903 le16_to_cpu(resp->mrav_num_entries_units);
6904 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6905 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6907 bnxt_init_ctx_initializer(ctx, resp);
6909 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6910 if (!ctx->tqm_fp_rings_count)
6911 ctx->tqm_fp_rings_count = bp->max_q;
6912 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6913 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
6915 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
6916 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6922 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6923 ctx->tqm_mem[i] = ctx_pg;
6929 mutex_unlock(&bp->hwrm_cmd_lock);
6933 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6936 if (!rmem->nr_pages)
6939 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
6940 if (rmem->depth >= 1) {
6941 if (rmem->depth == 2)
6945 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6947 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6951 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6952 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6953 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6954 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6955 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6956 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6958 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6960 struct hwrm_func_backing_store_cfg_input req = {0};
6961 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6962 struct bnxt_ctx_pg_info *ctx_pg;
6963 u32 req_len = sizeof(req);
6964 __le32 *num_entries;
6974 if (req_len > bp->hwrm_max_ext_req_len)
6975 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
6976 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6977 req.enables = cpu_to_le32(enables);
6979 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6980 ctx_pg = &ctx->qp_mem;
6981 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6982 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6983 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6984 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6985 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6986 &req.qpc_pg_size_qpc_lvl,
6989 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6990 ctx_pg = &ctx->srq_mem;
6991 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6992 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6993 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6994 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6995 &req.srq_pg_size_srq_lvl,
6998 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6999 ctx_pg = &ctx->cq_mem;
7000 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
7001 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7002 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7003 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
7006 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7007 ctx_pg = &ctx->vnic_mem;
7008 req.vnic_num_vnic_entries =
7009 cpu_to_le16(ctx->vnic_max_vnic_entries);
7010 req.vnic_num_ring_table_entries =
7011 cpu_to_le16(ctx->vnic_max_ring_table_entries);
7012 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7013 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7014 &req.vnic_pg_size_vnic_lvl,
7015 &req.vnic_page_dir);
7017 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7018 ctx_pg = &ctx->stat_mem;
7019 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7020 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7021 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7022 &req.stat_pg_size_stat_lvl,
7023 &req.stat_page_dir);
7025 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7026 ctx_pg = &ctx->mrav_mem;
7027 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7028 if (ctx->mrav_num_entries_units)
7030 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7031 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7032 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7033 &req.mrav_pg_size_mrav_lvl,
7034 &req.mrav_page_dir);
7036 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7037 ctx_pg = &ctx->tim_mem;
7038 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
7039 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7040 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7041 &req.tim_pg_size_tim_lvl,
7044 for (i = 0, num_entries = &req.tqm_sp_num_entries,
7045 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
7046 pg_dir = &req.tqm_sp_page_dir,
7047 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7048 i < BNXT_MAX_TQM_RINGS;
7049 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7050 if (!(enables & ena))
7053 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7054 ctx_pg = ctx->tqm_mem[i];
7055 *num_entries = cpu_to_le32(ctx_pg->entries);
7056 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7058 req.flags = cpu_to_le32(flags);
7059 return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT);
7062 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7063 struct bnxt_ctx_pg_info *ctx_pg)
7065 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7067 rmem->page_size = BNXT_PAGE_SIZE;
7068 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7069 rmem->dma_arr = ctx_pg->ctx_dma_arr;
7070 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7071 if (rmem->depth >= 1)
7072 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7073 return bnxt_alloc_ring(bp, rmem);
7076 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7077 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7078 u8 depth, struct bnxt_mem_init *mem_init)
7080 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7086 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7087 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7088 ctx_pg->nr_pages = 0;
7091 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7095 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7097 if (!ctx_pg->ctx_pg_tbl)
7099 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7100 rmem->nr_pages = nr_tbls;
7101 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7104 for (i = 0; i < nr_tbls; i++) {
7105 struct bnxt_ctx_pg_info *pg_tbl;
7107 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7110 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7111 rmem = &pg_tbl->ring_mem;
7112 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7113 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7115 rmem->nr_pages = MAX_CTX_PAGES;
7116 rmem->mem_init = mem_init;
7117 if (i == (nr_tbls - 1)) {
7118 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7121 rmem->nr_pages = rem;
7123 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7128 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7129 if (rmem->nr_pages > 1 || depth)
7131 rmem->mem_init = mem_init;
7132 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7137 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7138 struct bnxt_ctx_pg_info *ctx_pg)
7140 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7142 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7143 ctx_pg->ctx_pg_tbl) {
7144 int i, nr_tbls = rmem->nr_pages;
7146 for (i = 0; i < nr_tbls; i++) {
7147 struct bnxt_ctx_pg_info *pg_tbl;
7148 struct bnxt_ring_mem_info *rmem2;
7150 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7153 rmem2 = &pg_tbl->ring_mem;
7154 bnxt_free_ring(bp, rmem2);
7155 ctx_pg->ctx_pg_arr[i] = NULL;
7157 ctx_pg->ctx_pg_tbl[i] = NULL;
7159 kfree(ctx_pg->ctx_pg_tbl);
7160 ctx_pg->ctx_pg_tbl = NULL;
7162 bnxt_free_ring(bp, rmem);
7163 ctx_pg->nr_pages = 0;
7166 static void bnxt_free_ctx_mem(struct bnxt *bp)
7168 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7174 if (ctx->tqm_mem[0]) {
7175 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7176 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7177 kfree(ctx->tqm_mem[0]);
7178 ctx->tqm_mem[0] = NULL;
7181 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7182 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7183 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7184 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7185 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7186 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7187 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7188 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7191 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7193 struct bnxt_ctx_pg_info *ctx_pg;
7194 struct bnxt_ctx_mem_info *ctx;
7195 struct bnxt_mem_init *init;
7196 u32 mem_size, ena, entries;
7197 u32 entries_sp, min;
7204 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7206 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7211 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7214 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7220 ctx_pg = &ctx->qp_mem;
7221 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7223 if (ctx->qp_entry_size) {
7224 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7225 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7226 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7231 ctx_pg = &ctx->srq_mem;
7232 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7233 if (ctx->srq_entry_size) {
7234 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7235 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7236 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7241 ctx_pg = &ctx->cq_mem;
7242 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7243 if (ctx->cq_entry_size) {
7244 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7245 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7246 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7251 ctx_pg = &ctx->vnic_mem;
7252 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7253 ctx->vnic_max_ring_table_entries;
7254 if (ctx->vnic_entry_size) {
7255 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7256 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7257 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7262 ctx_pg = &ctx->stat_mem;
7263 ctx_pg->entries = ctx->stat_max_entries;
7264 if (ctx->stat_entry_size) {
7265 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7266 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7267 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7273 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7276 ctx_pg = &ctx->mrav_mem;
7277 /* 128K extra is needed to accommodate static AH context
7278 * allocation by f/w.
7280 num_mr = 1024 * 256;
7281 num_ah = 1024 * 128;
7282 ctx_pg->entries = num_mr + num_ah;
7283 if (ctx->mrav_entry_size) {
7284 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7285 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7286 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7290 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7291 if (ctx->mrav_num_entries_units)
7293 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7294 (num_ah / ctx->mrav_num_entries_units);
7296 ctx_pg = &ctx->tim_mem;
7297 ctx_pg->entries = ctx->qp_mem.entries;
7298 if (ctx->tim_entry_size) {
7299 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7300 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7304 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7307 min = ctx->tqm_min_entries_per_ring;
7308 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7309 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7310 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7311 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7312 entries = roundup(entries, ctx->tqm_entries_multiple);
7313 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7314 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7315 ctx_pg = ctx->tqm_mem[i];
7316 ctx_pg->entries = i ? entries : entries_sp;
7317 if (ctx->tqm_entry_size) {
7318 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7319 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7324 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7326 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7327 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7329 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7333 ctx->flags |= BNXT_CTX_FLAG_INITED;
7337 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7339 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7340 struct hwrm_func_resource_qcaps_input req = {0};
7341 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7344 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7345 req.fid = cpu_to_le16(0xffff);
7347 mutex_lock(&bp->hwrm_cmd_lock);
7348 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7351 goto hwrm_func_resc_qcaps_exit;
7353 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7355 goto hwrm_func_resc_qcaps_exit;
7357 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7358 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7359 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7360 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7361 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7362 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7363 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7364 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7365 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7366 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7367 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7368 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7369 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7370 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7371 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7372 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7374 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7375 u16 max_msix = le16_to_cpu(resp->max_msix);
7377 hw_resc->max_nqs = max_msix;
7378 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7382 struct bnxt_pf_info *pf = &bp->pf;
7384 pf->vf_resv_strategy =
7385 le16_to_cpu(resp->vf_reservation_strategy);
7386 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7387 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7389 hwrm_func_resc_qcaps_exit:
7390 mutex_unlock(&bp->hwrm_cmd_lock);
7394 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7397 struct hwrm_func_qcaps_input req = {0};
7398 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7399 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7400 u32 flags, flags_ext;
7402 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7403 req.fid = cpu_to_le16(0xffff);
7405 mutex_lock(&bp->hwrm_cmd_lock);
7406 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7408 goto hwrm_func_qcaps_exit;
7410 flags = le32_to_cpu(resp->flags);
7411 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7412 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7413 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7414 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7415 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7416 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7417 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7418 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7419 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7420 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7421 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7422 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7423 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7424 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7425 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7426 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7428 flags_ext = le32_to_cpu(resp->flags_ext);
7429 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7430 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7432 bp->tx_push_thresh = 0;
7433 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7434 BNXT_FW_MAJ(bp) > 217)
7435 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7437 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7438 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7439 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7440 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7441 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7442 if (!hw_resc->max_hw_ring_grps)
7443 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7444 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7445 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7446 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7449 struct bnxt_pf_info *pf = &bp->pf;
7451 pf->fw_fid = le16_to_cpu(resp->fid);
7452 pf->port_id = le16_to_cpu(resp->port_id);
7453 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7454 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7455 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7456 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7457 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7458 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7459 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7460 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7461 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7462 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7463 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7464 bp->flags |= BNXT_FLAG_WOL_CAP;
7466 #ifdef CONFIG_BNXT_SRIOV
7467 struct bnxt_vf_info *vf = &bp->vf;
7469 vf->fw_fid = le16_to_cpu(resp->fid);
7470 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7474 hwrm_func_qcaps_exit:
7475 mutex_unlock(&bp->hwrm_cmd_lock);
7479 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7481 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7485 rc = __bnxt_hwrm_func_qcaps(bp);
7488 rc = bnxt_hwrm_queue_qportcfg(bp);
7490 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7493 if (bp->hwrm_spec_code >= 0x10803) {
7494 rc = bnxt_alloc_ctx_mem(bp);
7497 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7499 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7504 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7506 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7507 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7511 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7514 resp = bp->hwrm_cmd_resp_addr;
7515 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7517 mutex_lock(&bp->hwrm_cmd_lock);
7518 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7520 goto hwrm_cfa_adv_qcaps_exit;
7522 flags = le32_to_cpu(resp->flags);
7524 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7525 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7527 hwrm_cfa_adv_qcaps_exit:
7528 mutex_unlock(&bp->hwrm_cmd_lock);
7532 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7537 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7544 static int bnxt_alloc_fw_health(struct bnxt *bp)
7548 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7549 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7552 rc = __bnxt_alloc_fw_health(bp);
7554 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7555 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7562 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7564 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7565 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7566 BNXT_FW_HEALTH_WIN_MAP_OFF);
7569 bool bnxt_is_fw_healthy(struct bnxt *bp)
7571 if (bp->fw_health && bp->fw_health->status_reliable) {
7574 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7575 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7582 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7584 struct bnxt_fw_health *fw_health = bp->fw_health;
7587 if (!fw_health || !fw_health->status_reliable)
7590 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7591 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7592 fw_health->status_reliable = false;
7595 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7603 bp->fw_health->status_reliable = false;
7605 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7606 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7608 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7609 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7610 if (!bp->chip_num) {
7611 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7612 bp->chip_num = readl(bp->bar0 +
7613 BNXT_FW_HEALTH_WIN_BASE +
7614 BNXT_GRC_REG_CHIP_NUM);
7616 if (!BNXT_CHIP_P5(bp))
7619 status_loc = BNXT_GRC_REG_STATUS_P5 |
7620 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7622 status_loc = readl(hs + offsetof(struct hcomm_status,
7626 if (__bnxt_alloc_fw_health(bp)) {
7627 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7631 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7632 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7633 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7634 __bnxt_map_fw_health_reg(bp, status_loc);
7635 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7636 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7639 bp->fw_health->status_reliable = true;
7642 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7644 struct bnxt_fw_health *fw_health = bp->fw_health;
7645 u32 reg_base = 0xffffffff;
7648 bp->fw_health->status_reliable = false;
7649 /* Only pre-map the monitoring GRC registers using window 3 */
7650 for (i = 0; i < 4; i++) {
7651 u32 reg = fw_health->regs[i];
7653 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7655 if (reg_base == 0xffffffff)
7656 reg_base = reg & BNXT_GRC_BASE_MASK;
7657 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7659 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7661 bp->fw_health->status_reliable = true;
7662 if (reg_base == 0xffffffff)
7665 __bnxt_map_fw_health_reg(bp, reg_base);
7669 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7671 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7672 struct bnxt_fw_health *fw_health = bp->fw_health;
7673 struct hwrm_error_recovery_qcfg_input req = {0};
7676 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7679 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7680 mutex_lock(&bp->hwrm_cmd_lock);
7681 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7683 goto err_recovery_out;
7684 fw_health->flags = le32_to_cpu(resp->flags);
7685 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7686 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7688 goto err_recovery_out;
7690 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7691 fw_health->master_func_wait_dsecs =
7692 le32_to_cpu(resp->master_func_wait_period);
7693 fw_health->normal_func_wait_dsecs =
7694 le32_to_cpu(resp->normal_func_wait_period);
7695 fw_health->post_reset_wait_dsecs =
7696 le32_to_cpu(resp->master_func_wait_period_after_reset);
7697 fw_health->post_reset_max_wait_dsecs =
7698 le32_to_cpu(resp->max_bailout_time_after_reset);
7699 fw_health->regs[BNXT_FW_HEALTH_REG] =
7700 le32_to_cpu(resp->fw_health_status_reg);
7701 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7702 le32_to_cpu(resp->fw_heartbeat_reg);
7703 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7704 le32_to_cpu(resp->fw_reset_cnt_reg);
7705 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7706 le32_to_cpu(resp->reset_inprogress_reg);
7707 fw_health->fw_reset_inprog_reg_mask =
7708 le32_to_cpu(resp->reset_inprogress_reg_mask);
7709 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7710 if (fw_health->fw_reset_seq_cnt >= 16) {
7712 goto err_recovery_out;
7714 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7715 fw_health->fw_reset_seq_regs[i] =
7716 le32_to_cpu(resp->reset_reg[i]);
7717 fw_health->fw_reset_seq_vals[i] =
7718 le32_to_cpu(resp->reset_reg_val[i]);
7719 fw_health->fw_reset_seq_delay_msec[i] =
7720 resp->delay_after_reset[i];
7723 mutex_unlock(&bp->hwrm_cmd_lock);
7725 rc = bnxt_map_fw_health_regs(bp);
7727 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7731 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7733 struct hwrm_func_reset_input req = {0};
7735 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7738 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7741 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7743 struct hwrm_nvm_get_dev_info_output nvm_info;
7745 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7746 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7747 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7748 nvm_info.nvm_cfg_ver_upd);
7751 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7754 struct hwrm_queue_qportcfg_input req = {0};
7755 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7759 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7761 mutex_lock(&bp->hwrm_cmd_lock);
7762 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7766 if (!resp->max_configurable_queues) {
7770 bp->max_tc = resp->max_configurable_queues;
7771 bp->max_lltc = resp->max_configurable_lossless_queues;
7772 if (bp->max_tc > BNXT_MAX_QUEUE)
7773 bp->max_tc = BNXT_MAX_QUEUE;
7775 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7776 qptr = &resp->queue_id0;
7777 for (i = 0, j = 0; i < bp->max_tc; i++) {
7778 bp->q_info[j].queue_id = *qptr;
7779 bp->q_ids[i] = *qptr++;
7780 bp->q_info[j].queue_profile = *qptr++;
7781 bp->tc_to_qidx[j] = j;
7782 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7783 (no_rdma && BNXT_PF(bp)))
7786 bp->max_q = bp->max_tc;
7787 bp->max_tc = max_t(u8, j, 1);
7789 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7792 if (bp->max_lltc > bp->max_tc)
7793 bp->max_lltc = bp->max_tc;
7796 mutex_unlock(&bp->hwrm_cmd_lock);
7800 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7802 struct hwrm_ver_get_input req = {0};
7805 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7806 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7807 req.hwrm_intf_min = HWRM_VERSION_MINOR;
7808 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7810 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7815 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7817 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7818 u16 fw_maj, fw_min, fw_bld, fw_rsv;
7819 u32 dev_caps_cfg, hwrm_ver;
7822 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7823 mutex_lock(&bp->hwrm_cmd_lock);
7824 rc = __bnxt_hwrm_ver_get(bp, false);
7826 goto hwrm_ver_get_exit;
7828 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7830 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7831 resp->hwrm_intf_min_8b << 8 |
7832 resp->hwrm_intf_upd_8b;
7833 if (resp->hwrm_intf_maj_8b < 1) {
7834 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7835 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7836 resp->hwrm_intf_upd_8b);
7837 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7840 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7841 HWRM_VERSION_UPDATE;
7843 if (bp->hwrm_spec_code > hwrm_ver)
7844 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7845 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7846 HWRM_VERSION_UPDATE);
7848 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7849 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7850 resp->hwrm_intf_upd_8b);
7852 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7853 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7854 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7855 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7856 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7857 len = FW_VER_STR_LEN;
7859 fw_maj = resp->hwrm_fw_maj_8b;
7860 fw_min = resp->hwrm_fw_min_8b;
7861 fw_bld = resp->hwrm_fw_bld_8b;
7862 fw_rsv = resp->hwrm_fw_rsvd_8b;
7863 len = BC_HWRM_STR_LEN;
7865 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7866 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7869 if (strlen(resp->active_pkg_name)) {
7870 int fw_ver_len = strlen(bp->fw_ver_str);
7872 snprintf(bp->fw_ver_str + fw_ver_len,
7873 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7874 resp->active_pkg_name);
7875 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7878 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7879 if (!bp->hwrm_cmd_timeout)
7880 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7882 if (resp->hwrm_intf_maj_8b >= 1) {
7883 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7884 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7886 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7887 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7889 bp->chip_num = le16_to_cpu(resp->chip_num);
7890 bp->chip_rev = resp->chip_rev;
7891 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7893 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7895 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7896 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7897 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7898 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7900 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7901 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7904 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7905 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7908 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7909 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7912 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7913 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7916 mutex_unlock(&bp->hwrm_cmd_lock);
7920 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7922 struct hwrm_fw_set_time_input req = {0};
7924 time64_t now = ktime_get_real_seconds();
7926 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7927 bp->hwrm_spec_code < 0x10400)
7930 time64_to_tm(now, 0, &tm);
7931 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7932 req.year = cpu_to_le16(1900 + tm.tm_year);
7933 req.month = 1 + tm.tm_mon;
7934 req.day = tm.tm_mday;
7935 req.hour = tm.tm_hour;
7936 req.minute = tm.tm_min;
7937 req.second = tm.tm_sec;
7938 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7941 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
7946 sw_tmp = (*sw & ~mask) | hw;
7947 if (hw < (*sw & mask))
7949 WRITE_ONCE(*sw, sw_tmp);
7952 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
7953 int count, bool ignore_zero)
7957 for (i = 0; i < count; i++) {
7958 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
7960 if (ignore_zero && !hw)
7963 if (masks[i] == -1ULL)
7966 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
7970 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
7972 if (!stats->hw_stats)
7975 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
7976 stats->hw_masks, stats->len / 8, false);
7979 static void bnxt_accumulate_all_stats(struct bnxt *bp)
7981 struct bnxt_stats_mem *ring0_stats;
7982 bool ignore_zero = false;
7985 /* Chip bug. Counter intermittently becomes 0. */
7986 if (bp->flags & BNXT_FLAG_CHIP_P5)
7989 for (i = 0; i < bp->cp_nr_rings; i++) {
7990 struct bnxt_napi *bnapi = bp->bnapi[i];
7991 struct bnxt_cp_ring_info *cpr;
7992 struct bnxt_stats_mem *stats;
7994 cpr = &bnapi->cp_ring;
7995 stats = &cpr->stats;
7997 ring0_stats = stats;
7998 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
7999 ring0_stats->hw_masks,
8000 ring0_stats->len / 8, ignore_zero);
8002 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8003 struct bnxt_stats_mem *stats = &bp->port_stats;
8004 __le64 *hw_stats = stats->hw_stats;
8005 u64 *sw_stats = stats->sw_stats;
8006 u64 *masks = stats->hw_masks;
8009 cnt = sizeof(struct rx_port_stats) / 8;
8010 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8012 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8013 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8014 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8015 cnt = sizeof(struct tx_port_stats) / 8;
8016 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8018 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8019 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8020 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8024 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8026 struct bnxt_pf_info *pf = &bp->pf;
8027 struct hwrm_port_qstats_input req = {0};
8029 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8032 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8036 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
8037 req.port_id = cpu_to_le16(pf->port_id);
8038 req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8039 BNXT_TX_PORT_STATS_BYTE_OFFSET);
8040 req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8041 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8044 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8046 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
8047 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
8048 struct hwrm_port_qstats_ext_input req = {0};
8049 struct bnxt_pf_info *pf = &bp->pf;
8053 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8056 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8059 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
8061 req.port_id = cpu_to_le16(pf->port_id);
8062 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8063 req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8064 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8065 sizeof(struct tx_port_stats_ext) : 0;
8066 req.tx_stat_size = cpu_to_le16(tx_stat_size);
8067 req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8068 mutex_lock(&bp->hwrm_cmd_lock);
8069 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8071 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
8072 bp->fw_tx_stats_ext_size = tx_stat_size ?
8073 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
8075 bp->fw_rx_stats_ext_size = 0;
8076 bp->fw_tx_stats_ext_size = 0;
8081 if (bp->fw_tx_stats_ext_size <=
8082 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8083 mutex_unlock(&bp->hwrm_cmd_lock);
8084 bp->pri2cos_valid = 0;
8088 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
8089 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8091 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
8093 struct hwrm_queue_pri2cos_qcfg_output *resp2;
8097 resp2 = bp->hwrm_cmd_resp_addr;
8098 pri2cos = &resp2->pri0_cos_queue_id;
8099 for (i = 0; i < 8; i++) {
8100 u8 queue_id = pri2cos[i];
8103 /* Per port queue IDs start from 0, 10, 20, etc */
8104 queue_idx = queue_id % 10;
8105 if (queue_idx > BNXT_MAX_QUEUE) {
8106 bp->pri2cos_valid = false;
8109 for (j = 0; j < bp->max_q; j++) {
8110 if (bp->q_ids[j] == queue_id)
8111 bp->pri2cos_idx[i] = queue_idx;
8114 bp->pri2cos_valid = 1;
8117 mutex_unlock(&bp->hwrm_cmd_lock);
8121 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8123 if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
8124 bnxt_hwrm_tunnel_dst_port_free(
8125 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8126 if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
8127 bnxt_hwrm_tunnel_dst_port_free(
8128 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8131 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8137 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8138 else if (BNXT_NO_FW_ACCESS(bp))
8140 for (i = 0; i < bp->nr_vnics; i++) {
8141 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8143 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8151 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8155 for (i = 0; i < bp->nr_vnics; i++)
8156 bnxt_hwrm_vnic_set_rss(bp, i, false);
8159 static void bnxt_clear_vnic(struct bnxt *bp)
8164 bnxt_hwrm_clear_vnic_filter(bp);
8165 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8166 /* clear all RSS setting before free vnic ctx */
8167 bnxt_hwrm_clear_vnic_rss(bp);
8168 bnxt_hwrm_vnic_ctx_free(bp);
8170 /* before free the vnic, undo the vnic tpa settings */
8171 if (bp->flags & BNXT_FLAG_TPA)
8172 bnxt_set_tpa(bp, false);
8173 bnxt_hwrm_vnic_free(bp);
8174 if (bp->flags & BNXT_FLAG_CHIP_P5)
8175 bnxt_hwrm_vnic_ctx_free(bp);
8178 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8181 bnxt_clear_vnic(bp);
8182 bnxt_hwrm_ring_free(bp, close_path);
8183 bnxt_hwrm_ring_grp_free(bp);
8185 bnxt_hwrm_stat_ctx_free(bp);
8186 bnxt_hwrm_free_tunnel_ports(bp);
8190 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8192 struct hwrm_func_cfg_input req = {0};
8194 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8195 req.fid = cpu_to_le16(0xffff);
8196 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8197 if (br_mode == BRIDGE_MODE_VEB)
8198 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8199 else if (br_mode == BRIDGE_MODE_VEPA)
8200 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8203 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8206 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8208 struct hwrm_func_cfg_input req = {0};
8210 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8213 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8214 req.fid = cpu_to_le16(0xffff);
8215 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8216 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8218 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8220 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8223 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8225 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8228 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8231 /* allocate context for vnic */
8232 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8234 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8236 goto vnic_setup_err;
8238 bp->rsscos_nr_ctxs++;
8240 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8241 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8243 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8245 goto vnic_setup_err;
8247 bp->rsscos_nr_ctxs++;
8251 /* configure default vnic, ring grp */
8252 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8254 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8256 goto vnic_setup_err;
8259 /* Enable RSS hashing on vnic */
8260 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8262 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8264 goto vnic_setup_err;
8267 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8268 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8270 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8279 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8283 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8284 for (i = 0; i < nr_ctxs; i++) {
8285 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8287 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8291 bp->rsscos_nr_ctxs++;
8296 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8298 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8302 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8304 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8308 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8309 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8311 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8318 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8320 if (bp->flags & BNXT_FLAG_CHIP_P5)
8321 return __bnxt_setup_vnic_p5(bp, vnic_id);
8323 return __bnxt_setup_vnic(bp, vnic_id);
8326 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8328 #ifdef CONFIG_RFS_ACCEL
8331 if (bp->flags & BNXT_FLAG_CHIP_P5)
8334 for (i = 0; i < bp->rx_nr_rings; i++) {
8335 struct bnxt_vnic_info *vnic;
8336 u16 vnic_id = i + 1;
8339 if (vnic_id >= bp->nr_vnics)
8342 vnic = &bp->vnic_info[vnic_id];
8343 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8344 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8345 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8346 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8348 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8352 rc = bnxt_setup_vnic(bp, vnic_id);
8362 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8363 static bool bnxt_promisc_ok(struct bnxt *bp)
8365 #ifdef CONFIG_BNXT_SRIOV
8366 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8372 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8374 unsigned int rc = 0;
8376 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8378 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8383 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8385 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8392 static int bnxt_cfg_rx_mode(struct bnxt *);
8393 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8395 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8397 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8399 unsigned int rx_nr_rings = bp->rx_nr_rings;
8402 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8404 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8410 rc = bnxt_hwrm_ring_alloc(bp);
8412 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8416 rc = bnxt_hwrm_ring_grp_alloc(bp);
8418 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8422 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8425 /* default vnic 0 */
8426 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8428 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8432 rc = bnxt_setup_vnic(bp, 0);
8436 if (bp->flags & BNXT_FLAG_RFS) {
8437 rc = bnxt_alloc_rfs_vnics(bp);
8442 if (bp->flags & BNXT_FLAG_TPA) {
8443 rc = bnxt_set_tpa(bp, true);
8449 bnxt_update_vf_mac(bp);
8451 /* Filter for default vnic 0 */
8452 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8454 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8457 vnic->uc_filter_count = 1;
8460 if (bp->dev->flags & IFF_BROADCAST)
8461 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8463 if (bp->dev->flags & IFF_PROMISC)
8464 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8466 if (bp->dev->flags & IFF_ALLMULTI) {
8467 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8468 vnic->mc_list_count = 0;
8472 bnxt_mc_list_updated(bp, &mask);
8473 vnic->rx_mask |= mask;
8476 rc = bnxt_cfg_rx_mode(bp);
8480 rc = bnxt_hwrm_set_coal(bp);
8482 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8485 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8486 rc = bnxt_setup_nitroa0_vnic(bp);
8488 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8493 bnxt_hwrm_func_qcfg(bp);
8494 netdev_update_features(bp->dev);
8500 bnxt_hwrm_resource_free(bp, 0, true);
8505 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8507 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8511 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8513 bnxt_init_cp_rings(bp);
8514 bnxt_init_rx_rings(bp);
8515 bnxt_init_tx_rings(bp);
8516 bnxt_init_ring_grps(bp, irq_re_init);
8517 bnxt_init_vnics(bp);
8519 return bnxt_init_chip(bp, irq_re_init);
8522 static int bnxt_set_real_num_queues(struct bnxt *bp)
8525 struct net_device *dev = bp->dev;
8527 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8528 bp->tx_nr_rings_xdp);
8532 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8536 #ifdef CONFIG_RFS_ACCEL
8537 if (bp->flags & BNXT_FLAG_RFS)
8538 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8544 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8547 int _rx = *rx, _tx = *tx;
8550 *rx = min_t(int, _rx, max);
8551 *tx = min_t(int, _tx, max);
8556 while (_rx + _tx > max) {
8557 if (_rx > _tx && _rx > 1)
8568 static void bnxt_setup_msix(struct bnxt *bp)
8570 const int len = sizeof(bp->irq_tbl[0].name);
8571 struct net_device *dev = bp->dev;
8574 tcs = netdev_get_num_tc(dev);
8578 for (i = 0; i < tcs; i++) {
8579 count = bp->tx_nr_rings_per_tc;
8581 netdev_set_tc_queue(dev, i, count, off);
8585 for (i = 0; i < bp->cp_nr_rings; i++) {
8586 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8589 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8591 else if (i < bp->rx_nr_rings)
8596 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8598 bp->irq_tbl[map_idx].handler = bnxt_msix;
8602 static void bnxt_setup_inta(struct bnxt *bp)
8604 const int len = sizeof(bp->irq_tbl[0].name);
8606 if (netdev_get_num_tc(bp->dev))
8607 netdev_reset_tc(bp->dev);
8609 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8611 bp->irq_tbl[0].handler = bnxt_inta;
8614 static int bnxt_init_int_mode(struct bnxt *bp);
8616 static int bnxt_setup_int_mode(struct bnxt *bp)
8621 rc = bnxt_init_int_mode(bp);
8622 if (rc || !bp->irq_tbl)
8623 return rc ?: -ENODEV;
8626 if (bp->flags & BNXT_FLAG_USING_MSIX)
8627 bnxt_setup_msix(bp);
8629 bnxt_setup_inta(bp);
8631 rc = bnxt_set_real_num_queues(bp);
8635 #ifdef CONFIG_RFS_ACCEL
8636 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8638 return bp->hw_resc.max_rsscos_ctxs;
8641 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8643 return bp->hw_resc.max_vnics;
8647 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8649 return bp->hw_resc.max_stat_ctxs;
8652 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8654 return bp->hw_resc.max_cp_rings;
8657 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8659 unsigned int cp = bp->hw_resc.max_cp_rings;
8661 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8662 cp -= bnxt_get_ulp_msix_num(bp);
8667 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8669 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8671 if (bp->flags & BNXT_FLAG_CHIP_P5)
8672 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8674 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8677 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8679 bp->hw_resc.max_irqs = max_irqs;
8682 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8686 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8687 if (bp->flags & BNXT_FLAG_CHIP_P5)
8688 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8690 return cp - bp->cp_nr_rings;
8693 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8695 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8698 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8700 int max_cp = bnxt_get_max_func_cp_rings(bp);
8701 int max_irq = bnxt_get_max_func_irqs(bp);
8702 int total_req = bp->cp_nr_rings + num;
8703 int max_idx, avail_msix;
8705 max_idx = bp->total_irqs;
8706 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8707 max_idx = min_t(int, bp->total_irqs, max_cp);
8708 avail_msix = max_idx - bp->cp_nr_rings;
8709 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8712 if (max_irq < total_req) {
8713 num = max_irq - bp->cp_nr_rings;
8720 static int bnxt_get_num_msix(struct bnxt *bp)
8722 if (!BNXT_NEW_RM(bp))
8723 return bnxt_get_max_func_irqs(bp);
8725 return bnxt_nq_rings_in_use(bp);
8728 static int bnxt_init_msix(struct bnxt *bp)
8730 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8731 struct msix_entry *msix_ent;
8733 total_vecs = bnxt_get_num_msix(bp);
8734 max = bnxt_get_max_func_irqs(bp);
8735 if (total_vecs > max)
8741 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8745 for (i = 0; i < total_vecs; i++) {
8746 msix_ent[i].entry = i;
8747 msix_ent[i].vector = 0;
8750 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8753 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8754 ulp_msix = bnxt_get_ulp_msix_num(bp);
8755 if (total_vecs < 0 || total_vecs < ulp_msix) {
8757 goto msix_setup_exit;
8760 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8762 for (i = 0; i < total_vecs; i++)
8763 bp->irq_tbl[i].vector = msix_ent[i].vector;
8765 bp->total_irqs = total_vecs;
8766 /* Trim rings based upon num of vectors allocated */
8767 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8768 total_vecs - ulp_msix, min == 1);
8770 goto msix_setup_exit;
8772 bp->cp_nr_rings = (min == 1) ?
8773 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8774 bp->tx_nr_rings + bp->rx_nr_rings;
8778 goto msix_setup_exit;
8780 bp->flags |= BNXT_FLAG_USING_MSIX;
8785 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8788 pci_disable_msix(bp->pdev);
8793 static int bnxt_init_inta(struct bnxt *bp)
8795 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
8800 bp->rx_nr_rings = 1;
8801 bp->tx_nr_rings = 1;
8802 bp->cp_nr_rings = 1;
8803 bp->flags |= BNXT_FLAG_SHARED_RINGS;
8804 bp->irq_tbl[0].vector = bp->pdev->irq;
8808 static int bnxt_init_int_mode(struct bnxt *bp)
8812 if (bp->flags & BNXT_FLAG_MSIX_CAP)
8813 rc = bnxt_init_msix(bp);
8815 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8816 /* fallback to INTA */
8817 rc = bnxt_init_inta(bp);
8822 static void bnxt_clear_int_mode(struct bnxt *bp)
8824 if (bp->flags & BNXT_FLAG_USING_MSIX)
8825 pci_disable_msix(bp->pdev);
8829 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8832 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8834 int tcs = netdev_get_num_tc(bp->dev);
8835 bool irq_cleared = false;
8838 if (!bnxt_need_reserve_rings(bp))
8841 if (irq_re_init && BNXT_NEW_RM(bp) &&
8842 bnxt_get_num_msix(bp) != bp->total_irqs) {
8843 bnxt_ulp_irq_stop(bp);
8844 bnxt_clear_int_mode(bp);
8847 rc = __bnxt_reserve_rings(bp);
8850 rc = bnxt_init_int_mode(bp);
8851 bnxt_ulp_irq_restart(bp, rc);
8854 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8857 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8858 netdev_err(bp->dev, "tx ring reservation failure\n");
8859 netdev_reset_tc(bp->dev);
8860 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8866 static void bnxt_free_irq(struct bnxt *bp)
8868 struct bnxt_irq *irq;
8871 #ifdef CONFIG_RFS_ACCEL
8872 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8873 bp->dev->rx_cpu_rmap = NULL;
8875 if (!bp->irq_tbl || !bp->bnapi)
8878 for (i = 0; i < bp->cp_nr_rings; i++) {
8879 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8881 irq = &bp->irq_tbl[map_idx];
8882 if (irq->requested) {
8883 if (irq->have_cpumask) {
8884 irq_set_affinity_hint(irq->vector, NULL);
8885 free_cpumask_var(irq->cpu_mask);
8886 irq->have_cpumask = 0;
8888 free_irq(irq->vector, bp->bnapi[i]);
8895 static int bnxt_request_irq(struct bnxt *bp)
8898 unsigned long flags = 0;
8899 #ifdef CONFIG_RFS_ACCEL
8900 struct cpu_rmap *rmap;
8903 rc = bnxt_setup_int_mode(bp);
8905 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8909 #ifdef CONFIG_RFS_ACCEL
8910 rmap = bp->dev->rx_cpu_rmap;
8912 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8913 flags = IRQF_SHARED;
8915 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
8916 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8917 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8919 #ifdef CONFIG_RFS_ACCEL
8920 if (rmap && bp->bnapi[i]->rx_ring) {
8921 rc = irq_cpu_rmap_add(rmap, irq->vector);
8923 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
8928 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8935 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8936 int numa_node = dev_to_node(&bp->pdev->dev);
8938 irq->have_cpumask = 1;
8939 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8941 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8943 netdev_warn(bp->dev,
8944 "Set affinity failed, IRQ = %d\n",
8953 static void bnxt_del_napi(struct bnxt *bp)
8960 for (i = 0; i < bp->cp_nr_rings; i++) {
8961 struct bnxt_napi *bnapi = bp->bnapi[i];
8963 __netif_napi_del(&bnapi->napi);
8965 /* We called __netif_napi_del(), we need
8966 * to respect an RCU grace period before freeing napi structures.
8971 static void bnxt_init_napi(struct bnxt *bp)
8974 unsigned int cp_nr_rings = bp->cp_nr_rings;
8975 struct bnxt_napi *bnapi;
8977 if (bp->flags & BNXT_FLAG_USING_MSIX) {
8978 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8980 if (bp->flags & BNXT_FLAG_CHIP_P5)
8981 poll_fn = bnxt_poll_p5;
8982 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8984 for (i = 0; i < cp_nr_rings; i++) {
8985 bnapi = bp->bnapi[i];
8986 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8988 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8989 bnapi = bp->bnapi[cp_nr_rings];
8990 netif_napi_add(bp->dev, &bnapi->napi,
8991 bnxt_poll_nitroa0, 64);
8994 bnapi = bp->bnapi[0];
8995 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
8999 static void bnxt_disable_napi(struct bnxt *bp)
9004 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9007 for (i = 0; i < bp->cp_nr_rings; i++) {
9008 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9010 if (bp->bnapi[i]->rx_ring)
9011 cancel_work_sync(&cpr->dim.work);
9013 napi_disable(&bp->bnapi[i]->napi);
9017 static void bnxt_enable_napi(struct bnxt *bp)
9021 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9022 for (i = 0; i < bp->cp_nr_rings; i++) {
9023 struct bnxt_napi *bnapi = bp->bnapi[i];
9024 struct bnxt_cp_ring_info *cpr;
9026 cpr = &bnapi->cp_ring;
9027 if (bnapi->in_reset)
9028 cpr->sw_stats.rx.rx_resets++;
9029 bnapi->in_reset = false;
9031 if (bnapi->rx_ring) {
9032 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9033 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9035 napi_enable(&bnapi->napi);
9039 void bnxt_tx_disable(struct bnxt *bp)
9042 struct bnxt_tx_ring_info *txr;
9045 for (i = 0; i < bp->tx_nr_rings; i++) {
9046 txr = &bp->tx_ring[i];
9047 txr->dev_state = BNXT_DEV_STATE_CLOSING;
9050 /* Drop carrier first to prevent TX timeout */
9051 netif_carrier_off(bp->dev);
9052 /* Stop all TX queues */
9053 netif_tx_disable(bp->dev);
9056 void bnxt_tx_enable(struct bnxt *bp)
9059 struct bnxt_tx_ring_info *txr;
9061 for (i = 0; i < bp->tx_nr_rings; i++) {
9062 txr = &bp->tx_ring[i];
9065 netif_tx_wake_all_queues(bp->dev);
9066 if (bp->link_info.link_up)
9067 netif_carrier_on(bp->dev);
9070 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9072 u8 active_fec = link_info->active_fec_sig_mode &
9073 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9075 switch (active_fec) {
9077 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9079 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9080 return "Clause 74 BaseR";
9081 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9082 return "Clause 91 RS(528,514)";
9083 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9084 return "Clause 91 RS544_1XN";
9085 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9086 return "Clause 91 RS(544,514)";
9087 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9088 return "Clause 91 RS272_1XN";
9089 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9090 return "Clause 91 RS(272,257)";
9094 static void bnxt_report_link(struct bnxt *bp)
9096 if (bp->link_info.link_up) {
9097 const char *signal = "";
9098 const char *flow_ctrl;
9103 netif_carrier_on(bp->dev);
9104 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9105 if (speed == SPEED_UNKNOWN) {
9106 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9109 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9113 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9114 flow_ctrl = "ON - receive & transmit";
9115 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9116 flow_ctrl = "ON - transmit";
9117 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9118 flow_ctrl = "ON - receive";
9121 if (bp->link_info.phy_qcfg_resp.option_flags &
9122 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9123 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9124 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9126 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9129 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9136 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9137 speed, signal, duplex, flow_ctrl);
9138 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9139 netdev_info(bp->dev, "EEE is %s\n",
9140 bp->eee.eee_active ? "active" :
9142 fec = bp->link_info.fec_cfg;
9143 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9144 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9145 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9146 bnxt_report_fec(&bp->link_info));
9148 netif_carrier_off(bp->dev);
9149 netdev_err(bp->dev, "NIC Link is Down\n");
9153 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9155 if (!resp->supported_speeds_auto_mode &&
9156 !resp->supported_speeds_force_mode &&
9157 !resp->supported_pam4_speeds_auto_mode &&
9158 !resp->supported_pam4_speeds_force_mode)
9163 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9166 struct hwrm_port_phy_qcaps_input req = {0};
9167 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9168 struct bnxt_link_info *link_info = &bp->link_info;
9170 if (bp->hwrm_spec_code < 0x10201)
9173 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
9175 mutex_lock(&bp->hwrm_cmd_lock);
9176 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9178 goto hwrm_phy_qcaps_exit;
9180 bp->phy_flags = resp->flags;
9181 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9182 struct ethtool_eee *eee = &bp->eee;
9183 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9185 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9186 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9187 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9188 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9189 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9192 if (bp->hwrm_spec_code >= 0x10a01) {
9193 if (bnxt_phy_qcaps_no_speed(resp)) {
9194 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9195 netdev_warn(bp->dev, "Ethernet link disabled\n");
9196 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9197 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9198 netdev_info(bp->dev, "Ethernet link enabled\n");
9199 /* Phy re-enabled, reprobe the speeds */
9200 link_info->support_auto_speeds = 0;
9201 link_info->support_pam4_auto_speeds = 0;
9204 if (resp->supported_speeds_auto_mode)
9205 link_info->support_auto_speeds =
9206 le16_to_cpu(resp->supported_speeds_auto_mode);
9207 if (resp->supported_pam4_speeds_auto_mode)
9208 link_info->support_pam4_auto_speeds =
9209 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9211 bp->port_count = resp->port_cnt;
9213 hwrm_phy_qcaps_exit:
9214 mutex_unlock(&bp->hwrm_cmd_lock);
9218 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9220 u16 diff = advertising ^ supported;
9222 return ((supported | diff) != supported);
9225 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9228 struct bnxt_link_info *link_info = &bp->link_info;
9229 struct hwrm_port_phy_qcfg_input req = {0};
9230 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9231 u8 link_up = link_info->link_up;
9232 bool support_changed = false;
9234 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
9236 mutex_lock(&bp->hwrm_cmd_lock);
9237 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9239 mutex_unlock(&bp->hwrm_cmd_lock);
9243 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9244 link_info->phy_link_status = resp->link;
9245 link_info->duplex = resp->duplex_cfg;
9246 if (bp->hwrm_spec_code >= 0x10800)
9247 link_info->duplex = resp->duplex_state;
9248 link_info->pause = resp->pause;
9249 link_info->auto_mode = resp->auto_mode;
9250 link_info->auto_pause_setting = resp->auto_pause;
9251 link_info->lp_pause = resp->link_partner_adv_pause;
9252 link_info->force_pause_setting = resp->force_pause;
9253 link_info->duplex_setting = resp->duplex_cfg;
9254 if (link_info->phy_link_status == BNXT_LINK_LINK)
9255 link_info->link_speed = le16_to_cpu(resp->link_speed);
9257 link_info->link_speed = 0;
9258 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9259 link_info->force_pam4_link_speed =
9260 le16_to_cpu(resp->force_pam4_link_speed);
9261 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9262 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9263 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9264 link_info->auto_pam4_link_speeds =
9265 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9266 link_info->lp_auto_link_speeds =
9267 le16_to_cpu(resp->link_partner_adv_speeds);
9268 link_info->lp_auto_pam4_link_speeds =
9269 resp->link_partner_pam4_adv_speeds;
9270 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9271 link_info->phy_ver[0] = resp->phy_maj;
9272 link_info->phy_ver[1] = resp->phy_min;
9273 link_info->phy_ver[2] = resp->phy_bld;
9274 link_info->media_type = resp->media_type;
9275 link_info->phy_type = resp->phy_type;
9276 link_info->transceiver = resp->xcvr_pkg_type;
9277 link_info->phy_addr = resp->eee_config_phy_addr &
9278 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9279 link_info->module_status = resp->module_status;
9281 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9282 struct ethtool_eee *eee = &bp->eee;
9285 eee->eee_active = 0;
9286 if (resp->eee_config_phy_addr &
9287 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9288 eee->eee_active = 1;
9289 fw_speeds = le16_to_cpu(
9290 resp->link_partner_adv_eee_link_speed_mask);
9291 eee->lp_advertised =
9292 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9295 /* Pull initial EEE config */
9296 if (!chng_link_state) {
9297 if (resp->eee_config_phy_addr &
9298 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9299 eee->eee_enabled = 1;
9301 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9303 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9305 if (resp->eee_config_phy_addr &
9306 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9309 eee->tx_lpi_enabled = 1;
9310 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9311 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9312 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9317 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9318 if (bp->hwrm_spec_code >= 0x10504) {
9319 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9320 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9322 /* TODO: need to add more logic to report VF link */
9323 if (chng_link_state) {
9324 if (link_info->phy_link_status == BNXT_LINK_LINK)
9325 link_info->link_up = 1;
9327 link_info->link_up = 0;
9328 if (link_up != link_info->link_up)
9329 bnxt_report_link(bp);
9331 /* alwasy link down if not require to update link state */
9332 link_info->link_up = 0;
9334 mutex_unlock(&bp->hwrm_cmd_lock);
9336 if (!BNXT_PHY_CFG_ABLE(bp))
9339 /* Check if any advertised speeds are no longer supported. The caller
9340 * holds the link_lock mutex, so we can modify link_info settings.
9342 if (bnxt_support_dropped(link_info->advertising,
9343 link_info->support_auto_speeds)) {
9344 link_info->advertising = link_info->support_auto_speeds;
9345 support_changed = true;
9347 if (bnxt_support_dropped(link_info->advertising_pam4,
9348 link_info->support_pam4_auto_speeds)) {
9349 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9350 support_changed = true;
9352 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9353 bnxt_hwrm_set_link_setting(bp, true, false);
9357 static void bnxt_get_port_module_status(struct bnxt *bp)
9359 struct bnxt_link_info *link_info = &bp->link_info;
9360 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9363 if (bnxt_update_link(bp, true))
9366 module_status = link_info->module_status;
9367 switch (module_status) {
9368 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9369 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9370 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9371 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9373 if (bp->hwrm_spec_code >= 0x10201) {
9374 netdev_warn(bp->dev, "Module part number %s\n",
9375 resp->phy_vendor_partnumber);
9377 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9378 netdev_warn(bp->dev, "TX is disabled\n");
9379 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9380 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9385 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9387 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9388 if (bp->hwrm_spec_code >= 0x10201)
9390 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9391 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9392 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9393 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9394 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9396 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9398 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9399 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9400 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9401 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9403 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9404 if (bp->hwrm_spec_code >= 0x10201) {
9405 req->auto_pause = req->force_pause;
9406 req->enables |= cpu_to_le32(
9407 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9412 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9414 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9415 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9416 if (bp->link_info.advertising) {
9417 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9418 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9420 if (bp->link_info.advertising_pam4) {
9422 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9423 req->auto_link_pam4_speed_mask =
9424 cpu_to_le16(bp->link_info.advertising_pam4);
9426 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9427 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9429 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9430 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9431 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9432 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9434 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9438 /* tell chimp that the setting takes effect immediately */
9439 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9442 int bnxt_hwrm_set_pause(struct bnxt *bp)
9444 struct hwrm_port_phy_cfg_input req = {0};
9447 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9448 bnxt_hwrm_set_pause_common(bp, &req);
9450 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9451 bp->link_info.force_link_chng)
9452 bnxt_hwrm_set_link_common(bp, &req);
9454 mutex_lock(&bp->hwrm_cmd_lock);
9455 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9456 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9457 /* since changing of pause setting doesn't trigger any link
9458 * change event, the driver needs to update the current pause
9459 * result upon successfully return of the phy_cfg command
9461 bp->link_info.pause =
9462 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9463 bp->link_info.auto_pause_setting = 0;
9464 if (!bp->link_info.force_link_chng)
9465 bnxt_report_link(bp);
9467 bp->link_info.force_link_chng = false;
9468 mutex_unlock(&bp->hwrm_cmd_lock);
9472 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9473 struct hwrm_port_phy_cfg_input *req)
9475 struct ethtool_eee *eee = &bp->eee;
9477 if (eee->eee_enabled) {
9479 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9481 if (eee->tx_lpi_enabled)
9482 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9484 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9486 req->flags |= cpu_to_le32(flags);
9487 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9488 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9489 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9491 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9495 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9497 struct hwrm_port_phy_cfg_input req = {0};
9499 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9501 bnxt_hwrm_set_pause_common(bp, &req);
9503 bnxt_hwrm_set_link_common(bp, &req);
9506 bnxt_hwrm_set_eee(bp, &req);
9507 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9510 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9512 struct hwrm_port_phy_cfg_input req = {0};
9514 if (!BNXT_SINGLE_PF(bp))
9517 if (pci_num_vf(bp->pdev) &&
9518 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9521 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9522 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9523 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9526 static int bnxt_fw_init_one(struct bnxt *bp);
9528 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9530 #ifdef CONFIG_TEE_BNXT_FW
9531 int rc = tee_bnxt_fw_load();
9534 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9538 netdev_err(bp->dev, "OP-TEE not supported\n");
9543 static int bnxt_try_recover_fw(struct bnxt *bp)
9545 if (bp->fw_health && bp->fw_health->status_reliable) {
9549 mutex_lock(&bp->hwrm_cmd_lock);
9551 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9552 rc = __bnxt_hwrm_ver_get(bp, true);
9553 if (!BNXT_FW_IS_BOOTING(sts) &&
9554 !BNXT_FW_IS_RECOVERING(sts))
9557 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9558 mutex_unlock(&bp->hwrm_cmd_lock);
9560 if (!BNXT_FW_IS_HEALTHY(sts)) {
9562 "Firmware not responding, status: 0x%x\n",
9566 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9567 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9568 return bnxt_fw_reset_via_optee(bp);
9576 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9578 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9579 struct hwrm_func_drv_if_change_input req = {0};
9580 bool fw_reset = !bp->irq_tbl;
9581 bool resc_reinit = false;
9585 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9588 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9590 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9591 mutex_lock(&bp->hwrm_cmd_lock);
9592 while (retry < BNXT_FW_IF_RETRY) {
9593 rc = _hwrm_send_message(bp, &req, sizeof(req),
9602 flags = le32_to_cpu(resp->flags);
9603 mutex_unlock(&bp->hwrm_cmd_lock);
9608 rc = bnxt_try_recover_fw(bp);
9615 bnxt_inv_fw_health_reg(bp);
9619 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9621 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9623 else if (bp->fw_health && !bp->fw_health->status_reliable)
9624 bnxt_try_map_fw_health_reg(bp);
9626 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9627 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9628 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9631 if (resc_reinit || fw_reset) {
9633 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9634 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9636 bnxt_free_ctx_mem(bp);
9640 rc = bnxt_fw_init_one(bp);
9642 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9643 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9646 bnxt_clear_int_mode(bp);
9647 rc = bnxt_init_int_mode(bp);
9649 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9650 netdev_err(bp->dev, "init int mode failed\n");
9654 if (BNXT_NEW_RM(bp)) {
9655 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9657 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9659 netdev_err(bp->dev, "resc_qcaps failed\n");
9661 hw_resc->resv_cp_rings = 0;
9662 hw_resc->resv_stat_ctxs = 0;
9663 hw_resc->resv_irqs = 0;
9664 hw_resc->resv_tx_rings = 0;
9665 hw_resc->resv_rx_rings = 0;
9666 hw_resc->resv_hw_ring_grps = 0;
9667 hw_resc->resv_vnics = 0;
9669 bp->tx_nr_rings = 0;
9670 bp->rx_nr_rings = 0;
9677 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9679 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9680 struct hwrm_port_led_qcaps_input req = {0};
9681 struct bnxt_pf_info *pf = &bp->pf;
9685 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9688 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9689 req.port_id = cpu_to_le16(pf->port_id);
9690 mutex_lock(&bp->hwrm_cmd_lock);
9691 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9693 mutex_unlock(&bp->hwrm_cmd_lock);
9696 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9699 bp->num_leds = resp->num_leds;
9700 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9702 for (i = 0; i < bp->num_leds; i++) {
9703 struct bnxt_led_info *led = &bp->leds[i];
9704 __le16 caps = led->led_state_caps;
9706 if (!led->led_group_id ||
9707 !BNXT_LED_ALT_BLINK_CAP(caps)) {
9713 mutex_unlock(&bp->hwrm_cmd_lock);
9717 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9719 struct hwrm_wol_filter_alloc_input req = {0};
9720 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9723 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9724 req.port_id = cpu_to_le16(bp->pf.port_id);
9725 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9726 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9727 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9728 mutex_lock(&bp->hwrm_cmd_lock);
9729 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9731 bp->wol_filter_id = resp->wol_filter_id;
9732 mutex_unlock(&bp->hwrm_cmd_lock);
9736 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9738 struct hwrm_wol_filter_free_input req = {0};
9740 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9741 req.port_id = cpu_to_le16(bp->pf.port_id);
9742 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9743 req.wol_filter_id = bp->wol_filter_id;
9744 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9747 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9749 struct hwrm_wol_filter_qcfg_input req = {0};
9750 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9751 u16 next_handle = 0;
9754 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
9755 req.port_id = cpu_to_le16(bp->pf.port_id);
9756 req.handle = cpu_to_le16(handle);
9757 mutex_lock(&bp->hwrm_cmd_lock);
9758 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9760 next_handle = le16_to_cpu(resp->next_handle);
9761 if (next_handle != 0) {
9762 if (resp->wol_type ==
9763 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9765 bp->wol_filter_id = resp->wol_filter_id;
9769 mutex_unlock(&bp->hwrm_cmd_lock);
9773 static void bnxt_get_wol_settings(struct bnxt *bp)
9778 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9782 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9783 } while (handle && handle != 0xffff);
9786 #ifdef CONFIG_BNXT_HWMON
9787 static ssize_t bnxt_show_temp(struct device *dev,
9788 struct device_attribute *devattr, char *buf)
9790 struct hwrm_temp_monitor_query_input req = {0};
9791 struct hwrm_temp_monitor_query_output *resp;
9792 struct bnxt *bp = dev_get_drvdata(dev);
9796 resp = bp->hwrm_cmd_resp_addr;
9797 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9798 mutex_lock(&bp->hwrm_cmd_lock);
9799 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9801 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9802 mutex_unlock(&bp->hwrm_cmd_lock);
9807 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9809 static struct attribute *bnxt_attrs[] = {
9810 &sensor_dev_attr_temp1_input.dev_attr.attr,
9813 ATTRIBUTE_GROUPS(bnxt);
9815 static void bnxt_hwmon_close(struct bnxt *bp)
9817 if (bp->hwmon_dev) {
9818 hwmon_device_unregister(bp->hwmon_dev);
9819 bp->hwmon_dev = NULL;
9823 static void bnxt_hwmon_open(struct bnxt *bp)
9825 struct hwrm_temp_monitor_query_input req = {0};
9826 struct pci_dev *pdev = bp->pdev;
9829 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9830 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9831 if (rc == -EACCES || rc == -EOPNOTSUPP) {
9832 bnxt_hwmon_close(bp);
9839 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9840 DRV_MODULE_NAME, bp,
9842 if (IS_ERR(bp->hwmon_dev)) {
9843 bp->hwmon_dev = NULL;
9844 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9848 static void bnxt_hwmon_close(struct bnxt *bp)
9852 static void bnxt_hwmon_open(struct bnxt *bp)
9857 static bool bnxt_eee_config_ok(struct bnxt *bp)
9859 struct ethtool_eee *eee = &bp->eee;
9860 struct bnxt_link_info *link_info = &bp->link_info;
9862 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
9865 if (eee->eee_enabled) {
9867 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9869 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9870 eee->eee_enabled = 0;
9873 if (eee->advertised & ~advertising) {
9874 eee->advertised = advertising & eee->supported;
9881 static int bnxt_update_phy_setting(struct bnxt *bp)
9884 bool update_link = false;
9885 bool update_pause = false;
9886 bool update_eee = false;
9887 struct bnxt_link_info *link_info = &bp->link_info;
9889 rc = bnxt_update_link(bp, true);
9891 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9895 if (!BNXT_SINGLE_PF(bp))
9898 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9899 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
9900 link_info->req_flow_ctrl)
9901 update_pause = true;
9902 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9903 link_info->force_pause_setting != link_info->req_flow_ctrl)
9904 update_pause = true;
9905 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9906 if (BNXT_AUTO_MODE(link_info->auto_mode))
9908 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
9909 link_info->req_link_speed != link_info->force_link_speed)
9911 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
9912 link_info->req_link_speed != link_info->force_pam4_link_speed)
9914 if (link_info->req_duplex != link_info->duplex_setting)
9917 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
9919 if (link_info->advertising != link_info->auto_link_speeds ||
9920 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
9924 /* The last close may have shutdown the link, so need to call
9925 * PHY_CFG to bring it back up.
9927 if (!bp->link_info.link_up)
9930 if (!bnxt_eee_config_ok(bp))
9934 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
9935 else if (update_pause)
9936 rc = bnxt_hwrm_set_pause(bp);
9938 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9946 /* Common routine to pre-map certain register block to different GRC window.
9947 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9948 * in PF and 3 windows in VF that can be customized to map in different
9951 static void bnxt_preset_reg_win(struct bnxt *bp)
9954 /* CAG registers map to GRC window #4 */
9955 writel(BNXT_CAG_REG_BASE,
9956 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9960 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9962 static int bnxt_reinit_after_abort(struct bnxt *bp)
9966 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9969 if (bp->dev->reg_state == NETREG_UNREGISTERED)
9972 rc = bnxt_fw_init_one(bp);
9974 bnxt_clear_int_mode(bp);
9975 rc = bnxt_init_int_mode(bp);
9977 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9978 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9984 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9988 bnxt_preset_reg_win(bp);
9989 netif_carrier_off(bp->dev);
9991 /* Reserve rings now if none were reserved at driver probe. */
9992 rc = bnxt_init_dflt_ring_mode(bp);
9994 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9998 rc = bnxt_reserve_rings(bp, irq_re_init);
10001 if ((bp->flags & BNXT_FLAG_RFS) &&
10002 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10003 /* disable RFS if falling back to INTA */
10004 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10005 bp->flags &= ~BNXT_FLAG_RFS;
10008 rc = bnxt_alloc_mem(bp, irq_re_init);
10010 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10011 goto open_err_free_mem;
10015 bnxt_init_napi(bp);
10016 rc = bnxt_request_irq(bp);
10018 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10023 rc = bnxt_init_nic(bp, irq_re_init);
10025 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10029 bnxt_enable_napi(bp);
10030 bnxt_debug_dev_init(bp);
10032 if (link_re_init) {
10033 mutex_lock(&bp->link_lock);
10034 rc = bnxt_update_phy_setting(bp);
10035 mutex_unlock(&bp->link_lock);
10037 netdev_warn(bp->dev, "failed to update phy settings\n");
10038 if (BNXT_SINGLE_PF(bp)) {
10039 bp->link_info.phy_retry = true;
10040 bp->link_info.phy_retry_expires =
10047 udp_tunnel_nic_reset_ntf(bp->dev);
10049 set_bit(BNXT_STATE_OPEN, &bp->state);
10050 bnxt_enable_int(bp);
10051 /* Enable TX queues */
10052 bnxt_tx_enable(bp);
10053 mod_timer(&bp->timer, jiffies + bp->current_interval);
10054 /* Poll link status and check for SFP+ module status */
10055 bnxt_get_port_module_status(bp);
10057 /* VF-reps may need to be re-opened after the PF is re-opened */
10059 bnxt_vf_reps_open(bp);
10066 bnxt_free_skbs(bp);
10068 bnxt_free_mem(bp, true);
10072 /* rtnl_lock held */
10073 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10077 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10080 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10082 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10083 dev_close(bp->dev);
10088 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10089 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10092 int bnxt_half_open_nic(struct bnxt *bp)
10096 rc = bnxt_alloc_mem(bp, false);
10098 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10099 goto half_open_err;
10101 rc = bnxt_init_nic(bp, false);
10103 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10104 goto half_open_err;
10109 bnxt_free_skbs(bp);
10110 bnxt_free_mem(bp, false);
10111 dev_close(bp->dev);
10115 /* rtnl_lock held, this call can only be made after a previous successful
10116 * call to bnxt_half_open_nic().
10118 void bnxt_half_close_nic(struct bnxt *bp)
10120 bnxt_hwrm_resource_free(bp, false, false);
10121 bnxt_free_skbs(bp);
10122 bnxt_free_mem(bp, false);
10125 static void bnxt_reenable_sriov(struct bnxt *bp)
10128 struct bnxt_pf_info *pf = &bp->pf;
10129 int n = pf->active_vfs;
10132 bnxt_cfg_hw_sriov(bp, &n, true);
10136 static int bnxt_open(struct net_device *dev)
10138 struct bnxt *bp = netdev_priv(dev);
10141 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10142 rc = bnxt_reinit_after_abort(bp);
10145 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10147 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10152 rc = bnxt_hwrm_if_change(bp, true);
10155 rc = __bnxt_open_nic(bp, true, true);
10157 bnxt_hwrm_if_change(bp, false);
10159 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10160 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10161 bnxt_ulp_start(bp, 0);
10162 bnxt_reenable_sriov(bp);
10165 bnxt_hwmon_open(bp);
10171 static bool bnxt_drv_busy(struct bnxt *bp)
10173 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10174 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10177 static void bnxt_get_ring_stats(struct bnxt *bp,
10178 struct rtnl_link_stats64 *stats);
10180 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10183 /* Close the VF-reps before closing PF */
10185 bnxt_vf_reps_close(bp);
10187 /* Change device state to avoid TX queue wake up's */
10188 bnxt_tx_disable(bp);
10190 clear_bit(BNXT_STATE_OPEN, &bp->state);
10191 smp_mb__after_atomic();
10192 while (bnxt_drv_busy(bp))
10195 /* Flush rings and and disable interrupts */
10196 bnxt_shutdown_nic(bp, irq_re_init);
10198 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10200 bnxt_debug_dev_exit(bp);
10201 bnxt_disable_napi(bp);
10202 del_timer_sync(&bp->timer);
10203 bnxt_free_skbs(bp);
10205 /* Save ring stats before shutdown */
10206 if (bp->bnapi && irq_re_init)
10207 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10212 bnxt_free_mem(bp, irq_re_init);
10215 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10219 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10220 /* If we get here, it means firmware reset is in progress
10221 * while we are trying to close. We can safely proceed with
10222 * the close because we are holding rtnl_lock(). Some firmware
10223 * messages may fail as we proceed to close. We set the
10224 * ABORT_ERR flag here so that the FW reset thread will later
10225 * abort when it gets the rtnl_lock() and sees the flag.
10227 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10228 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10231 #ifdef CONFIG_BNXT_SRIOV
10232 if (bp->sriov_cfg) {
10233 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10235 BNXT_SRIOV_CFG_WAIT_TMO);
10237 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10240 __bnxt_close_nic(bp, irq_re_init, link_re_init);
10244 static int bnxt_close(struct net_device *dev)
10246 struct bnxt *bp = netdev_priv(dev);
10248 bnxt_hwmon_close(bp);
10249 bnxt_close_nic(bp, true, true);
10250 bnxt_hwrm_shutdown_link(bp);
10251 bnxt_hwrm_if_change(bp, false);
10255 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10258 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
10259 struct hwrm_port_phy_mdio_read_input req = {0};
10262 if (bp->hwrm_spec_code < 0x10a00)
10263 return -EOPNOTSUPP;
10265 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
10266 req.port_id = cpu_to_le16(bp->pf.port_id);
10267 req.phy_addr = phy_addr;
10268 req.reg_addr = cpu_to_le16(reg & 0x1f);
10269 if (mdio_phy_id_is_c45(phy_addr)) {
10271 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10272 req.dev_addr = mdio_phy_id_devad(phy_addr);
10273 req.reg_addr = cpu_to_le16(reg);
10276 mutex_lock(&bp->hwrm_cmd_lock);
10277 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10279 *val = le16_to_cpu(resp->reg_data);
10280 mutex_unlock(&bp->hwrm_cmd_lock);
10284 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10287 struct hwrm_port_phy_mdio_write_input req = {0};
10289 if (bp->hwrm_spec_code < 0x10a00)
10290 return -EOPNOTSUPP;
10292 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
10293 req.port_id = cpu_to_le16(bp->pf.port_id);
10294 req.phy_addr = phy_addr;
10295 req.reg_addr = cpu_to_le16(reg & 0x1f);
10296 if (mdio_phy_id_is_c45(phy_addr)) {
10298 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10299 req.dev_addr = mdio_phy_id_devad(phy_addr);
10300 req.reg_addr = cpu_to_le16(reg);
10302 req.reg_data = cpu_to_le16(val);
10304 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10307 /* rtnl_lock held */
10308 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10310 struct mii_ioctl_data *mdio = if_mii(ifr);
10311 struct bnxt *bp = netdev_priv(dev);
10316 mdio->phy_id = bp->link_info.phy_addr;
10319 case SIOCGMIIREG: {
10320 u16 mii_regval = 0;
10322 if (!netif_running(dev))
10325 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10327 mdio->val_out = mii_regval;
10332 if (!netif_running(dev))
10335 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10342 return -EOPNOTSUPP;
10345 static void bnxt_get_ring_stats(struct bnxt *bp,
10346 struct rtnl_link_stats64 *stats)
10350 for (i = 0; i < bp->cp_nr_rings; i++) {
10351 struct bnxt_napi *bnapi = bp->bnapi[i];
10352 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10353 u64 *sw = cpr->stats.sw_stats;
10355 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10356 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10357 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10359 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10360 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10361 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10363 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10364 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10365 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10367 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10368 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10369 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10371 stats->rx_missed_errors +=
10372 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10374 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10376 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10380 static void bnxt_add_prev_stats(struct bnxt *bp,
10381 struct rtnl_link_stats64 *stats)
10383 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10385 stats->rx_packets += prev_stats->rx_packets;
10386 stats->tx_packets += prev_stats->tx_packets;
10387 stats->rx_bytes += prev_stats->rx_bytes;
10388 stats->tx_bytes += prev_stats->tx_bytes;
10389 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10390 stats->multicast += prev_stats->multicast;
10391 stats->tx_dropped += prev_stats->tx_dropped;
10395 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10397 struct bnxt *bp = netdev_priv(dev);
10399 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10400 /* Make sure bnxt_close_nic() sees that we are reading stats before
10401 * we check the BNXT_STATE_OPEN flag.
10403 smp_mb__after_atomic();
10404 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10405 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10406 *stats = bp->net_stats_prev;
10410 bnxt_get_ring_stats(bp, stats);
10411 bnxt_add_prev_stats(bp, stats);
10413 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10414 u64 *rx = bp->port_stats.sw_stats;
10415 u64 *tx = bp->port_stats.sw_stats +
10416 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10418 stats->rx_crc_errors =
10419 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10420 stats->rx_frame_errors =
10421 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10422 stats->rx_length_errors =
10423 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10424 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10425 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10427 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10428 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10429 stats->collisions =
10430 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10431 stats->tx_fifo_errors =
10432 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10433 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10435 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10438 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10440 struct net_device *dev = bp->dev;
10441 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10442 struct netdev_hw_addr *ha;
10445 bool update = false;
10448 netdev_for_each_mc_addr(ha, dev) {
10449 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10450 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10451 vnic->mc_list_count = 0;
10455 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10456 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10463 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10465 if (mc_count != vnic->mc_list_count) {
10466 vnic->mc_list_count = mc_count;
10472 static bool bnxt_uc_list_updated(struct bnxt *bp)
10474 struct net_device *dev = bp->dev;
10475 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10476 struct netdev_hw_addr *ha;
10479 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10482 netdev_for_each_uc_addr(ha, dev) {
10483 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10491 static void bnxt_set_rx_mode(struct net_device *dev)
10493 struct bnxt *bp = netdev_priv(dev);
10494 struct bnxt_vnic_info *vnic;
10495 bool mc_update = false;
10499 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10502 vnic = &bp->vnic_info[0];
10503 mask = vnic->rx_mask;
10504 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10505 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10506 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10507 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10509 if (dev->flags & IFF_PROMISC)
10510 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10512 uc_update = bnxt_uc_list_updated(bp);
10514 if (dev->flags & IFF_BROADCAST)
10515 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10516 if (dev->flags & IFF_ALLMULTI) {
10517 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10518 vnic->mc_list_count = 0;
10520 mc_update = bnxt_mc_list_updated(bp, &mask);
10523 if (mask != vnic->rx_mask || uc_update || mc_update) {
10524 vnic->rx_mask = mask;
10526 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10527 bnxt_queue_sp_work(bp);
10531 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10533 struct net_device *dev = bp->dev;
10534 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10535 struct netdev_hw_addr *ha;
10536 int i, off = 0, rc;
10539 netif_addr_lock_bh(dev);
10540 uc_update = bnxt_uc_list_updated(bp);
10541 netif_addr_unlock_bh(dev);
10546 mutex_lock(&bp->hwrm_cmd_lock);
10547 for (i = 1; i < vnic->uc_filter_count; i++) {
10548 struct hwrm_cfa_l2_filter_free_input req = {0};
10550 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10553 req.l2_filter_id = vnic->fw_l2_filter_id[i];
10555 rc = _hwrm_send_message(bp, &req, sizeof(req),
10558 mutex_unlock(&bp->hwrm_cmd_lock);
10560 vnic->uc_filter_count = 1;
10562 netif_addr_lock_bh(dev);
10563 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10564 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10566 netdev_for_each_uc_addr(ha, dev) {
10567 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10569 vnic->uc_filter_count++;
10572 netif_addr_unlock_bh(dev);
10574 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10575 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10577 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10579 vnic->uc_filter_count = i;
10585 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10586 !bnxt_promisc_ok(bp))
10587 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10588 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10589 if (rc && vnic->mc_list_count) {
10590 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10592 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10593 vnic->mc_list_count = 0;
10594 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10597 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10603 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10605 #ifdef CONFIG_BNXT_SRIOV
10606 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10607 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10609 /* No minimum rings were provisioned by the PF. Don't
10610 * reserve rings by default when device is down.
10612 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10615 if (!netif_running(bp->dev))
10622 /* If the chip and firmware supports RFS */
10623 static bool bnxt_rfs_supported(struct bnxt *bp)
10625 if (bp->flags & BNXT_FLAG_CHIP_P5) {
10626 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10630 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10632 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10637 /* If runtime conditions support RFS */
10638 static bool bnxt_rfs_capable(struct bnxt *bp)
10640 #ifdef CONFIG_RFS_ACCEL
10641 int vnics, max_vnics, max_rss_ctxs;
10643 if (bp->flags & BNXT_FLAG_CHIP_P5)
10644 return bnxt_rfs_supported(bp);
10645 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10648 vnics = 1 + bp->rx_nr_rings;
10649 max_vnics = bnxt_get_max_func_vnics(bp);
10650 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10652 /* RSS contexts not a limiting factor */
10653 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10654 max_rss_ctxs = max_vnics;
10655 if (vnics > max_vnics || vnics > max_rss_ctxs) {
10656 if (bp->rx_nr_rings > 1)
10657 netdev_warn(bp->dev,
10658 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10659 min(max_rss_ctxs - 1, max_vnics - 1));
10663 if (!BNXT_NEW_RM(bp))
10666 if (vnics == bp->hw_resc.resv_vnics)
10669 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10670 if (vnics <= bp->hw_resc.resv_vnics)
10673 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10674 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10681 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10682 netdev_features_t features)
10684 struct bnxt *bp = netdev_priv(dev);
10685 netdev_features_t vlan_features;
10687 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10688 features &= ~NETIF_F_NTUPLE;
10690 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10691 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10693 if (!(features & NETIF_F_GRO))
10694 features &= ~NETIF_F_GRO_HW;
10696 if (features & NETIF_F_GRO_HW)
10697 features &= ~NETIF_F_LRO;
10699 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10700 * turned on or off together.
10702 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10703 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10704 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10705 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10706 else if (vlan_features)
10707 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10709 #ifdef CONFIG_BNXT_SRIOV
10710 if (BNXT_VF(bp) && bp->vf.vlan)
10711 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10716 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10718 struct bnxt *bp = netdev_priv(dev);
10719 u32 flags = bp->flags;
10722 bool re_init = false;
10723 bool update_tpa = false;
10725 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10726 if (features & NETIF_F_GRO_HW)
10727 flags |= BNXT_FLAG_GRO;
10728 else if (features & NETIF_F_LRO)
10729 flags |= BNXT_FLAG_LRO;
10731 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10732 flags &= ~BNXT_FLAG_TPA;
10734 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10735 flags |= BNXT_FLAG_STRIP_VLAN;
10737 if (features & NETIF_F_NTUPLE)
10738 flags |= BNXT_FLAG_RFS;
10740 changes = flags ^ bp->flags;
10741 if (changes & BNXT_FLAG_TPA) {
10743 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10744 (flags & BNXT_FLAG_TPA) == 0 ||
10745 (bp->flags & BNXT_FLAG_CHIP_P5))
10749 if (changes & ~BNXT_FLAG_TPA)
10752 if (flags != bp->flags) {
10753 u32 old_flags = bp->flags;
10755 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10758 bnxt_set_ring_params(bp);
10763 bnxt_close_nic(bp, false, false);
10766 bnxt_set_ring_params(bp);
10768 return bnxt_open_nic(bp, false, false);
10772 rc = bnxt_set_tpa(bp,
10773 (flags & BNXT_FLAG_TPA) ?
10776 bp->flags = old_flags;
10782 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
10785 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
10790 /* Check that there are at most 2 IPv6 extension headers, no
10791 * fragment header, and each is <= 64 bytes.
10793 start = nw_off + sizeof(*ip6h);
10794 nexthdr = &ip6h->nexthdr;
10795 while (ipv6_ext_hdr(*nexthdr)) {
10796 struct ipv6_opt_hdr *hp;
10799 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
10800 *nexthdr == NEXTHDR_FRAGMENT)
10802 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
10803 skb_headlen(skb), NULL);
10806 if (*nexthdr == NEXTHDR_AUTH)
10807 hdrlen = ipv6_authlen(hp);
10809 hdrlen = ipv6_optlen(hp);
10813 nexthdr = &hp->nexthdr;
10818 /* Caller will check inner protocol */
10819 if (skb->encapsulation) {
10825 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
10826 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
10829 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
10830 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
10832 struct udphdr *uh = udp_hdr(skb);
10833 __be16 udp_port = uh->dest;
10835 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
10837 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
10838 struct ethhdr *eh = inner_eth_hdr(skb);
10840 switch (eh->h_proto) {
10841 case htons(ETH_P_IP):
10843 case htons(ETH_P_IPV6):
10844 return bnxt_exthdr_check(bp, skb,
10845 skb_inner_network_offset(skb),
10852 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
10854 switch (l4_proto) {
10856 return bnxt_udp_tunl_check(bp, skb);
10859 case IPPROTO_GRE: {
10860 switch (skb->inner_protocol) {
10863 case htons(ETH_P_IP):
10865 case htons(ETH_P_IPV6):
10870 /* Check ext headers of inner ipv6 */
10871 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
10877 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
10878 struct net_device *dev,
10879 netdev_features_t features)
10881 struct bnxt *bp = netdev_priv(dev);
10884 features = vlan_features_check(skb, features);
10885 switch (vlan_get_protocol(skb)) {
10886 case htons(ETH_P_IP):
10887 if (!skb->encapsulation)
10889 l4_proto = &ip_hdr(skb)->protocol;
10890 if (bnxt_tunl_check(bp, skb, *l4_proto))
10893 case htons(ETH_P_IPV6):
10894 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
10897 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
10901 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
10904 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
10907 struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
10908 struct hwrm_dbg_read_direct_input req = {0};
10909 __le32 *dbg_reg_buf;
10910 dma_addr_t mapping;
10913 dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
10914 &mapping, GFP_KERNEL);
10917 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
10918 req.host_dest_addr = cpu_to_le64(mapping);
10919 req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
10920 req.read_len32 = cpu_to_le32(num_words);
10921 mutex_lock(&bp->hwrm_cmd_lock);
10922 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10923 if (rc || resp->error_code) {
10925 goto dbg_rd_reg_exit;
10927 for (i = 0; i < num_words; i++)
10928 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
10931 mutex_unlock(&bp->hwrm_cmd_lock);
10932 dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
10936 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
10937 u32 ring_id, u32 *prod, u32 *cons)
10939 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
10940 struct hwrm_dbg_ring_info_get_input req = {0};
10943 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
10944 req.ring_type = ring_type;
10945 req.fw_ring_id = cpu_to_le32(ring_id);
10946 mutex_lock(&bp->hwrm_cmd_lock);
10947 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10949 *prod = le32_to_cpu(resp->producer_index);
10950 *cons = le32_to_cpu(resp->consumer_index);
10952 mutex_unlock(&bp->hwrm_cmd_lock);
10956 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
10958 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
10959 int i = bnapi->index;
10964 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
10965 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
10969 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
10971 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
10972 int i = bnapi->index;
10977 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
10978 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
10979 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
10980 rxr->rx_sw_agg_prod);
10983 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
10985 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10986 int i = bnapi->index;
10988 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
10989 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
10992 static void bnxt_dbg_dump_states(struct bnxt *bp)
10995 struct bnxt_napi *bnapi;
10997 for (i = 0; i < bp->cp_nr_rings; i++) {
10998 bnapi = bp->bnapi[i];
10999 if (netif_msg_drv(bp)) {
11000 bnxt_dump_tx_sw_state(bnapi);
11001 bnxt_dump_rx_sw_state(bnapi);
11002 bnxt_dump_cp_sw_state(bnapi);
11007 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11009 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11010 struct hwrm_ring_reset_input req = {0};
11011 struct bnxt_napi *bnapi = rxr->bnapi;
11012 struct bnxt_cp_ring_info *cpr;
11015 cpr = &bnapi->cp_ring;
11016 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11017 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
11018 req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11019 req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11020 return hwrm_send_message_silent(bp, &req, sizeof(req),
11024 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11027 bnxt_dbg_dump_states(bp);
11028 if (netif_running(bp->dev)) {
11032 bnxt_close_nic(bp, false, false);
11033 bnxt_open_nic(bp, false, false);
11036 bnxt_close_nic(bp, true, false);
11037 rc = bnxt_open_nic(bp, true, false);
11038 bnxt_ulp_start(bp, rc);
11043 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11045 struct bnxt *bp = netdev_priv(dev);
11047 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
11048 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11049 bnxt_queue_sp_work(bp);
11052 static void bnxt_fw_health_check(struct bnxt *bp)
11054 struct bnxt_fw_health *fw_health = bp->fw_health;
11057 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11060 if (fw_health->tmr_counter) {
11061 fw_health->tmr_counter--;
11065 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11066 if (val == fw_health->last_fw_heartbeat)
11069 fw_health->last_fw_heartbeat = val;
11071 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11072 if (val != fw_health->last_fw_reset_cnt)
11075 fw_health->tmr_counter = fw_health->tmr_multiplier;
11079 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11080 bnxt_queue_sp_work(bp);
11083 static void bnxt_timer(struct timer_list *t)
11085 struct bnxt *bp = from_timer(bp, t, timer);
11086 struct net_device *dev = bp->dev;
11088 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11091 if (atomic_read(&bp->intr_sem) != 0)
11092 goto bnxt_restart_timer;
11094 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11095 bnxt_fw_health_check(bp);
11097 if (bp->link_info.link_up && bp->stats_coal_ticks) {
11098 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11099 bnxt_queue_sp_work(bp);
11102 if (bnxt_tc_flower_enabled(bp)) {
11103 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11104 bnxt_queue_sp_work(bp);
11107 #ifdef CONFIG_RFS_ACCEL
11108 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11109 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11110 bnxt_queue_sp_work(bp);
11112 #endif /*CONFIG_RFS_ACCEL*/
11114 if (bp->link_info.phy_retry) {
11115 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11116 bp->link_info.phy_retry = false;
11117 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11119 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11120 bnxt_queue_sp_work(bp);
11124 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11125 netif_carrier_ok(dev)) {
11126 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11127 bnxt_queue_sp_work(bp);
11129 bnxt_restart_timer:
11130 mod_timer(&bp->timer, jiffies + bp->current_interval);
11133 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11135 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11136 * set. If the device is being closed, bnxt_close() may be holding
11137 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11138 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11140 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11144 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11146 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11150 /* Only called from bnxt_sp_task() */
11151 static void bnxt_reset(struct bnxt *bp, bool silent)
11153 bnxt_rtnl_lock_sp(bp);
11154 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11155 bnxt_reset_task(bp, silent);
11156 bnxt_rtnl_unlock_sp(bp);
11159 /* Only called from bnxt_sp_task() */
11160 static void bnxt_rx_ring_reset(struct bnxt *bp)
11164 bnxt_rtnl_lock_sp(bp);
11165 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11166 bnxt_rtnl_unlock_sp(bp);
11169 /* Disable and flush TPA before resetting the RX ring */
11170 if (bp->flags & BNXT_FLAG_TPA)
11171 bnxt_set_tpa(bp, false);
11172 for (i = 0; i < bp->rx_nr_rings; i++) {
11173 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11174 struct bnxt_cp_ring_info *cpr;
11177 if (!rxr->bnapi->in_reset)
11180 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11182 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11183 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11185 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11187 bnxt_reset_task(bp, true);
11190 bnxt_free_one_rx_ring_skbs(bp, i);
11192 rxr->rx_agg_prod = 0;
11193 rxr->rx_sw_agg_prod = 0;
11194 rxr->rx_next_cons = 0;
11195 rxr->bnapi->in_reset = false;
11196 bnxt_alloc_one_rx_ring(bp, i);
11197 cpr = &rxr->bnapi->cp_ring;
11198 cpr->sw_stats.rx.rx_resets++;
11199 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11200 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11201 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11203 if (bp->flags & BNXT_FLAG_TPA)
11204 bnxt_set_tpa(bp, true);
11205 bnxt_rtnl_unlock_sp(bp);
11208 static void bnxt_fw_reset_close(struct bnxt *bp)
11211 /* When firmware is in fatal state, quiesce device and disable
11212 * bus master to prevent any potential bad DMAs before freeing
11215 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11218 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11220 bp->fw_reset_min_dsecs = 0;
11221 bnxt_tx_disable(bp);
11222 bnxt_disable_napi(bp);
11223 bnxt_disable_int_sync(bp);
11225 bnxt_clear_int_mode(bp);
11226 pci_disable_device(bp->pdev);
11228 __bnxt_close_nic(bp, true, false);
11229 bnxt_vf_reps_free(bp);
11230 bnxt_clear_int_mode(bp);
11231 bnxt_hwrm_func_drv_unrgtr(bp);
11232 if (pci_is_enabled(bp->pdev))
11233 pci_disable_device(bp->pdev);
11234 bnxt_free_ctx_mem(bp);
11239 static bool is_bnxt_fw_ok(struct bnxt *bp)
11241 struct bnxt_fw_health *fw_health = bp->fw_health;
11242 bool no_heartbeat = false, has_reset = false;
11245 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11246 if (val == fw_health->last_fw_heartbeat)
11247 no_heartbeat = true;
11249 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11250 if (val != fw_health->last_fw_reset_cnt)
11253 if (!no_heartbeat && has_reset)
11259 /* rtnl_lock is acquired before calling this function */
11260 static void bnxt_force_fw_reset(struct bnxt *bp)
11262 struct bnxt_fw_health *fw_health = bp->fw_health;
11265 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11266 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11269 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11270 bnxt_fw_reset_close(bp);
11271 wait_dsecs = fw_health->master_func_wait_dsecs;
11272 if (fw_health->master) {
11273 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11275 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11277 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11278 wait_dsecs = fw_health->normal_func_wait_dsecs;
11279 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11282 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11283 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11284 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11287 void bnxt_fw_exception(struct bnxt *bp)
11289 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11290 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11291 bnxt_rtnl_lock_sp(bp);
11292 bnxt_force_fw_reset(bp);
11293 bnxt_rtnl_unlock_sp(bp);
11296 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11299 static int bnxt_get_registered_vfs(struct bnxt *bp)
11301 #ifdef CONFIG_BNXT_SRIOV
11307 rc = bnxt_hwrm_func_qcfg(bp);
11309 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11312 if (bp->pf.registered_vfs)
11313 return bp->pf.registered_vfs;
11320 void bnxt_fw_reset(struct bnxt *bp)
11322 bnxt_rtnl_lock_sp(bp);
11323 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11324 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11327 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11328 if (bp->pf.active_vfs &&
11329 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11330 n = bnxt_get_registered_vfs(bp);
11332 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11334 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11335 dev_close(bp->dev);
11336 goto fw_reset_exit;
11337 } else if (n > 0) {
11338 u16 vf_tmo_dsecs = n * 10;
11340 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11341 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11342 bp->fw_reset_state =
11343 BNXT_FW_RESET_STATE_POLL_VF;
11344 bnxt_queue_fw_reset_work(bp, HZ / 10);
11345 goto fw_reset_exit;
11347 bnxt_fw_reset_close(bp);
11348 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11349 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11352 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11353 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11355 bnxt_queue_fw_reset_work(bp, tmo);
11358 bnxt_rtnl_unlock_sp(bp);
11361 static void bnxt_chk_missed_irq(struct bnxt *bp)
11365 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11368 for (i = 0; i < bp->cp_nr_rings; i++) {
11369 struct bnxt_napi *bnapi = bp->bnapi[i];
11370 struct bnxt_cp_ring_info *cpr;
11377 cpr = &bnapi->cp_ring;
11378 for (j = 0; j < 2; j++) {
11379 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11382 if (!cpr2 || cpr2->has_more_work ||
11383 !bnxt_has_work(bp, cpr2))
11386 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11387 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11390 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11391 bnxt_dbg_hwrm_ring_info_get(bp,
11392 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11393 fw_ring_id, &val[0], &val[1]);
11394 cpr->sw_stats.cmn.missed_irqs++;
11399 static void bnxt_cfg_ntp_filters(struct bnxt *);
11401 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11403 struct bnxt_link_info *link_info = &bp->link_info;
11405 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11406 link_info->autoneg = BNXT_AUTONEG_SPEED;
11407 if (bp->hwrm_spec_code >= 0x10201) {
11408 if (link_info->auto_pause_setting &
11409 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11410 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11412 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11414 link_info->advertising = link_info->auto_link_speeds;
11415 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11417 link_info->req_link_speed = link_info->force_link_speed;
11418 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11419 if (link_info->force_pam4_link_speed) {
11420 link_info->req_link_speed =
11421 link_info->force_pam4_link_speed;
11422 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11424 link_info->req_duplex = link_info->duplex_setting;
11426 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11427 link_info->req_flow_ctrl =
11428 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11430 link_info->req_flow_ctrl = link_info->force_pause_setting;
11433 static void bnxt_fw_echo_reply(struct bnxt *bp)
11435 struct bnxt_fw_health *fw_health = bp->fw_health;
11436 struct hwrm_func_echo_response_input req = {0};
11438 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
11439 req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11440 req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11441 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11444 static void bnxt_sp_task(struct work_struct *work)
11446 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11448 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11449 smp_mb__after_atomic();
11450 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11451 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11455 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11456 bnxt_cfg_rx_mode(bp);
11458 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11459 bnxt_cfg_ntp_filters(bp);
11460 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11461 bnxt_hwrm_exec_fwd_req(bp);
11462 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11463 bnxt_hwrm_port_qstats(bp, 0);
11464 bnxt_hwrm_port_qstats_ext(bp, 0);
11465 bnxt_accumulate_all_stats(bp);
11468 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11471 mutex_lock(&bp->link_lock);
11472 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11474 bnxt_hwrm_phy_qcaps(bp);
11476 rc = bnxt_update_link(bp, true);
11478 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11481 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11483 bnxt_init_ethtool_link_settings(bp);
11484 mutex_unlock(&bp->link_lock);
11486 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11489 mutex_lock(&bp->link_lock);
11490 rc = bnxt_update_phy_setting(bp);
11491 mutex_unlock(&bp->link_lock);
11493 netdev_warn(bp->dev, "update phy settings retry failed\n");
11495 bp->link_info.phy_retry = false;
11496 netdev_info(bp->dev, "update phy settings retry succeeded\n");
11499 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11500 mutex_lock(&bp->link_lock);
11501 bnxt_get_port_module_status(bp);
11502 mutex_unlock(&bp->link_lock);
11505 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11506 bnxt_tc_flow_stats_work(bp);
11508 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11509 bnxt_chk_missed_irq(bp);
11511 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11512 bnxt_fw_echo_reply(bp);
11514 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
11515 * must be the last functions to be called before exiting.
11517 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11518 bnxt_reset(bp, false);
11520 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11521 bnxt_reset(bp, true);
11523 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11524 bnxt_rx_ring_reset(bp);
11526 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11527 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11529 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11530 if (!is_bnxt_fw_ok(bp))
11531 bnxt_devlink_health_report(bp,
11532 BNXT_FW_EXCEPTION_SP_EVENT);
11535 smp_mb__before_atomic();
11536 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11539 /* Under rtnl_lock */
11540 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11543 int max_rx, max_tx, tx_sets = 1;
11544 int tx_rings_needed, stats;
11551 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11558 tx_rings_needed = tx * tx_sets + tx_xdp;
11559 if (max_tx < tx_rings_needed)
11563 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11566 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11568 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11570 if (BNXT_NEW_RM(bp)) {
11571 cp += bnxt_get_ulp_msix_num(bp);
11572 stats += bnxt_get_ulp_stat_ctxs(bp);
11574 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11578 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11581 pci_iounmap(pdev, bp->bar2);
11586 pci_iounmap(pdev, bp->bar1);
11591 pci_iounmap(pdev, bp->bar0);
11596 static void bnxt_cleanup_pci(struct bnxt *bp)
11598 bnxt_unmap_bars(bp, bp->pdev);
11599 pci_release_regions(bp->pdev);
11600 if (pci_is_enabled(bp->pdev))
11601 pci_disable_device(bp->pdev);
11604 static void bnxt_init_dflt_coal(struct bnxt *bp)
11606 struct bnxt_coal *coal;
11608 /* Tick values in micro seconds.
11609 * 1 coal_buf x bufs_per_record = 1 completion record.
11611 coal = &bp->rx_coal;
11612 coal->coal_ticks = 10;
11613 coal->coal_bufs = 30;
11614 coal->coal_ticks_irq = 1;
11615 coal->coal_bufs_irq = 2;
11616 coal->idle_thresh = 50;
11617 coal->bufs_per_record = 2;
11618 coal->budget = 64; /* NAPI budget */
11620 coal = &bp->tx_coal;
11621 coal->coal_ticks = 28;
11622 coal->coal_bufs = 30;
11623 coal->coal_ticks_irq = 2;
11624 coal->coal_bufs_irq = 2;
11625 coal->bufs_per_record = 1;
11627 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11630 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11635 rc = bnxt_hwrm_ver_get(bp);
11636 bnxt_try_map_fw_health_reg(bp);
11638 rc = bnxt_try_recover_fw(bp);
11641 rc = bnxt_hwrm_ver_get(bp);
11646 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
11647 rc = bnxt_alloc_kong_hwrm_resources(bp);
11649 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
11652 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
11653 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
11654 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
11658 bnxt_nvm_cfg_ver_get(bp);
11660 rc = bnxt_hwrm_func_reset(bp);
11664 bnxt_hwrm_fw_set_time(bp);
11668 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11672 /* Get the MAX capabilities for this function */
11673 rc = bnxt_hwrm_func_qcaps(bp);
11675 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11680 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11682 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11685 if (bnxt_alloc_fw_health(bp)) {
11686 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11688 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11690 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11694 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11698 bnxt_hwrm_func_qcfg(bp);
11699 bnxt_hwrm_vnic_qcaps(bp);
11700 bnxt_hwrm_port_led_qcaps(bp);
11701 bnxt_ethtool_init(bp);
11706 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11708 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11709 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11710 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11711 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11712 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11713 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11714 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11715 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11716 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11720 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11722 struct net_device *dev = bp->dev;
11724 dev->hw_features &= ~NETIF_F_NTUPLE;
11725 dev->features &= ~NETIF_F_NTUPLE;
11726 bp->flags &= ~BNXT_FLAG_RFS;
11727 if (bnxt_rfs_supported(bp)) {
11728 dev->hw_features |= NETIF_F_NTUPLE;
11729 if (bnxt_rfs_capable(bp)) {
11730 bp->flags |= BNXT_FLAG_RFS;
11731 dev->features |= NETIF_F_NTUPLE;
11736 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11738 struct pci_dev *pdev = bp->pdev;
11740 bnxt_set_dflt_rss_hash_type(bp);
11741 bnxt_set_dflt_rfs(bp);
11743 bnxt_get_wol_settings(bp);
11744 if (bp->flags & BNXT_FLAG_WOL_CAP)
11745 device_set_wakeup_enable(&pdev->dev, bp->wol);
11747 device_set_wakeup_capable(&pdev->dev, false);
11749 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11750 bnxt_hwrm_coal_params_qcaps(bp);
11753 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11755 static int bnxt_fw_init_one(struct bnxt *bp)
11759 rc = bnxt_fw_init_one_p1(bp);
11761 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11764 rc = bnxt_fw_init_one_p2(bp);
11766 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11769 rc = bnxt_probe_phy(bp, false);
11772 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11776 /* In case fw capabilities have changed, destroy the unneeded
11777 * reporters and create newly capable ones.
11779 bnxt_dl_fw_reporters_destroy(bp, false);
11780 bnxt_dl_fw_reporters_create(bp);
11781 bnxt_fw_init_one_p3(bp);
11785 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11787 struct bnxt_fw_health *fw_health = bp->fw_health;
11788 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11789 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11790 u32 reg_type, reg_off, delay_msecs;
11792 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11793 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11794 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11795 switch (reg_type) {
11796 case BNXT_FW_HEALTH_REG_TYPE_CFG:
11797 pci_write_config_dword(bp->pdev, reg_off, val);
11799 case BNXT_FW_HEALTH_REG_TYPE_GRC:
11800 writel(reg_off & BNXT_GRC_BASE_MASK,
11801 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11802 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
11804 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
11805 writel(val, bp->bar0 + reg_off);
11807 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
11808 writel(val, bp->bar1 + reg_off);
11812 pci_read_config_dword(bp->pdev, 0, &val);
11813 msleep(delay_msecs);
11817 static void bnxt_reset_all(struct bnxt *bp)
11819 struct bnxt_fw_health *fw_health = bp->fw_health;
11822 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11823 bnxt_fw_reset_via_optee(bp);
11824 bp->fw_reset_timestamp = jiffies;
11828 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
11829 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
11830 bnxt_fw_reset_writel(bp, i);
11831 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
11832 struct hwrm_fw_reset_input req = {0};
11834 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
11835 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
11836 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
11837 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
11838 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
11839 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11841 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
11843 bp->fw_reset_timestamp = jiffies;
11846 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
11848 return time_after(jiffies, bp->fw_reset_timestamp +
11849 (bp->fw_reset_max_dsecs * HZ / 10));
11852 static void bnxt_fw_reset_task(struct work_struct *work)
11854 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
11857 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11858 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
11862 switch (bp->fw_reset_state) {
11863 case BNXT_FW_RESET_STATE_POLL_VF: {
11864 int n = bnxt_get_registered_vfs(bp);
11868 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
11869 n, jiffies_to_msecs(jiffies -
11870 bp->fw_reset_timestamp));
11871 goto fw_reset_abort;
11872 } else if (n > 0) {
11873 if (bnxt_fw_reset_timeout(bp)) {
11874 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11875 bp->fw_reset_state = 0;
11876 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
11880 bnxt_queue_fw_reset_work(bp, HZ / 10);
11883 bp->fw_reset_timestamp = jiffies;
11885 bnxt_fw_reset_close(bp);
11886 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11887 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11890 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11891 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11894 bnxt_queue_fw_reset_work(bp, tmo);
11897 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
11900 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11901 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
11902 !bnxt_fw_reset_timeout(bp)) {
11903 bnxt_queue_fw_reset_work(bp, HZ / 5);
11907 if (!bp->fw_health->master) {
11908 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
11910 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11911 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11914 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11917 case BNXT_FW_RESET_STATE_RESET_FW:
11918 bnxt_reset_all(bp);
11919 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11920 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
11922 case BNXT_FW_RESET_STATE_ENABLE_DEV:
11923 bnxt_inv_fw_health_reg(bp);
11924 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
11925 !bp->fw_reset_min_dsecs) {
11928 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11929 if (val == 0xffff) {
11930 if (bnxt_fw_reset_timeout(bp)) {
11931 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
11932 goto fw_reset_abort;
11934 bnxt_queue_fw_reset_work(bp, HZ / 1000);
11938 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11939 if (pci_enable_device(bp->pdev)) {
11940 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
11941 goto fw_reset_abort;
11943 pci_set_master(bp->pdev);
11944 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
11946 case BNXT_FW_RESET_STATE_POLL_FW:
11947 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
11948 rc = __bnxt_hwrm_ver_get(bp, true);
11950 if (bnxt_fw_reset_timeout(bp)) {
11951 netdev_err(bp->dev, "Firmware reset aborted\n");
11952 goto fw_reset_abort_status;
11954 bnxt_queue_fw_reset_work(bp, HZ / 5);
11957 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
11958 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
11960 case BNXT_FW_RESET_STATE_OPENING:
11961 while (!rtnl_trylock()) {
11962 bnxt_queue_fw_reset_work(bp, HZ / 10);
11965 rc = bnxt_open(bp->dev);
11967 netdev_err(bp->dev, "bnxt_open_nic() failed\n");
11968 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11969 dev_close(bp->dev);
11972 bp->fw_reset_state = 0;
11973 /* Make sure fw_reset_state is 0 before clearing the flag */
11974 smp_mb__before_atomic();
11975 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11976 bnxt_ulp_start(bp, rc);
11978 bnxt_reenable_sriov(bp);
11979 bnxt_vf_reps_alloc(bp);
11980 bnxt_vf_reps_open(bp);
11981 bnxt_dl_health_recovery_done(bp);
11982 bnxt_dl_health_status_update(bp, true);
11988 fw_reset_abort_status:
11989 if (bp->fw_health->status_reliable ||
11990 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
11991 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11993 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
11996 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11997 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
11998 bnxt_dl_health_status_update(bp, false);
11999 bp->fw_reset_state = 0;
12001 dev_close(bp->dev);
12005 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12008 struct bnxt *bp = netdev_priv(dev);
12010 SET_NETDEV_DEV(dev, &pdev->dev);
12012 /* enable device (incl. PCI PM wakeup), and bus-mastering */
12013 rc = pci_enable_device(pdev);
12015 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12019 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12020 dev_err(&pdev->dev,
12021 "Cannot find PCI device base address, aborting\n");
12023 goto init_err_disable;
12026 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12028 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12029 goto init_err_disable;
12032 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12033 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12034 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12036 goto init_err_release;
12039 pci_set_master(pdev);
12044 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12045 * determines the BAR size.
12047 bp->bar0 = pci_ioremap_bar(pdev, 0);
12049 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12051 goto init_err_release;
12054 bp->bar2 = pci_ioremap_bar(pdev, 4);
12056 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12058 goto init_err_release;
12061 pci_enable_pcie_error_reporting(pdev);
12063 INIT_WORK(&bp->sp_task, bnxt_sp_task);
12064 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12066 spin_lock_init(&bp->ntp_fltr_lock);
12067 #if BITS_PER_LONG == 32
12068 spin_lock_init(&bp->db_lock);
12071 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12072 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12074 bnxt_init_dflt_coal(bp);
12076 timer_setup(&bp->timer, bnxt_timer, 0);
12077 bp->current_interval = BNXT_TIMER_INTERVAL;
12079 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12080 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12082 clear_bit(BNXT_STATE_OPEN, &bp->state);
12086 bnxt_unmap_bars(bp, pdev);
12087 pci_release_regions(pdev);
12090 pci_disable_device(pdev);
12096 /* rtnl_lock held */
12097 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12099 struct sockaddr *addr = p;
12100 struct bnxt *bp = netdev_priv(dev);
12103 if (!is_valid_ether_addr(addr->sa_data))
12104 return -EADDRNOTAVAIL;
12106 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12109 rc = bnxt_approve_mac(bp, addr->sa_data, true);
12113 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12114 if (netif_running(dev)) {
12115 bnxt_close_nic(bp, false, false);
12116 rc = bnxt_open_nic(bp, false, false);
12122 /* rtnl_lock held */
12123 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12125 struct bnxt *bp = netdev_priv(dev);
12127 if (netif_running(dev))
12128 bnxt_close_nic(bp, true, false);
12130 dev->mtu = new_mtu;
12131 bnxt_set_ring_params(bp);
12133 if (netif_running(dev))
12134 return bnxt_open_nic(bp, true, false);
12139 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12141 struct bnxt *bp = netdev_priv(dev);
12145 if (tc > bp->max_tc) {
12146 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12151 if (netdev_get_num_tc(dev) == tc)
12154 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12157 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12158 sh, tc, bp->tx_nr_rings_xdp);
12162 /* Needs to close the device and do hw resource re-allocations */
12163 if (netif_running(bp->dev))
12164 bnxt_close_nic(bp, true, false);
12167 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12168 netdev_set_num_tc(dev, tc);
12170 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12171 netdev_reset_tc(dev);
12173 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12174 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12175 bp->tx_nr_rings + bp->rx_nr_rings;
12177 if (netif_running(bp->dev))
12178 return bnxt_open_nic(bp, true, false);
12183 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12186 struct bnxt *bp = cb_priv;
12188 if (!bnxt_tc_flower_enabled(bp) ||
12189 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12190 return -EOPNOTSUPP;
12193 case TC_SETUP_CLSFLOWER:
12194 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12196 return -EOPNOTSUPP;
12200 LIST_HEAD(bnxt_block_cb_list);
12202 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12205 struct bnxt *bp = netdev_priv(dev);
12208 case TC_SETUP_BLOCK:
12209 return flow_block_cb_setup_simple(type_data,
12210 &bnxt_block_cb_list,
12211 bnxt_setup_tc_block_cb,
12213 case TC_SETUP_QDISC_MQPRIO: {
12214 struct tc_mqprio_qopt *mqprio = type_data;
12216 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12218 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12221 return -EOPNOTSUPP;
12225 #ifdef CONFIG_RFS_ACCEL
12226 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12227 struct bnxt_ntuple_filter *f2)
12229 struct flow_keys *keys1 = &f1->fkeys;
12230 struct flow_keys *keys2 = &f2->fkeys;
12232 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12233 keys1->basic.ip_proto != keys2->basic.ip_proto)
12236 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12237 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12238 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12241 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12242 sizeof(keys1->addrs.v6addrs.src)) ||
12243 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12244 sizeof(keys1->addrs.v6addrs.dst)))
12248 if (keys1->ports.ports == keys2->ports.ports &&
12249 keys1->control.flags == keys2->control.flags &&
12250 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12251 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12257 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12258 u16 rxq_index, u32 flow_id)
12260 struct bnxt *bp = netdev_priv(dev);
12261 struct bnxt_ntuple_filter *fltr, *new_fltr;
12262 struct flow_keys *fkeys;
12263 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12264 int rc = 0, idx, bit_id, l2_idx = 0;
12265 struct hlist_head *head;
12268 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12269 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12272 netif_addr_lock_bh(dev);
12273 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12274 if (ether_addr_equal(eth->h_dest,
12275 vnic->uc_list + off)) {
12280 netif_addr_unlock_bh(dev);
12284 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12288 fkeys = &new_fltr->fkeys;
12289 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12290 rc = -EPROTONOSUPPORT;
12294 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12295 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12296 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12297 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12298 rc = -EPROTONOSUPPORT;
12301 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12302 bp->hwrm_spec_code < 0x10601) {
12303 rc = -EPROTONOSUPPORT;
12306 flags = fkeys->control.flags;
12307 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12308 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12309 rc = -EPROTONOSUPPORT;
12313 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12314 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12316 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12317 head = &bp->ntp_fltr_hash_tbl[idx];
12319 hlist_for_each_entry_rcu(fltr, head, hash) {
12320 if (bnxt_fltr_match(fltr, new_fltr)) {
12328 spin_lock_bh(&bp->ntp_fltr_lock);
12329 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12330 BNXT_NTP_FLTR_MAX_FLTR, 0);
12332 spin_unlock_bh(&bp->ntp_fltr_lock);
12337 new_fltr->sw_id = (u16)bit_id;
12338 new_fltr->flow_id = flow_id;
12339 new_fltr->l2_fltr_idx = l2_idx;
12340 new_fltr->rxq = rxq_index;
12341 hlist_add_head_rcu(&new_fltr->hash, head);
12342 bp->ntp_fltr_count++;
12343 spin_unlock_bh(&bp->ntp_fltr_lock);
12345 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12346 bnxt_queue_sp_work(bp);
12348 return new_fltr->sw_id;
12355 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12359 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12360 struct hlist_head *head;
12361 struct hlist_node *tmp;
12362 struct bnxt_ntuple_filter *fltr;
12365 head = &bp->ntp_fltr_hash_tbl[i];
12366 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12369 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12370 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12373 bnxt_hwrm_cfa_ntuple_filter_free(bp,
12378 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12383 set_bit(BNXT_FLTR_VALID, &fltr->state);
12387 spin_lock_bh(&bp->ntp_fltr_lock);
12388 hlist_del_rcu(&fltr->hash);
12389 bp->ntp_fltr_count--;
12390 spin_unlock_bh(&bp->ntp_fltr_lock);
12392 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12397 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12398 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12403 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12407 #endif /* CONFIG_RFS_ACCEL */
12409 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12411 struct bnxt *bp = netdev_priv(netdev);
12412 struct udp_tunnel_info ti;
12415 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12416 if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
12417 bp->vxlan_port = ti.port;
12418 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12420 bp->nge_port = ti.port;
12421 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12425 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12427 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12430 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12431 .sync_table = bnxt_udp_tunnel_sync,
12432 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12433 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12435 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
12436 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12440 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12441 struct net_device *dev, u32 filter_mask,
12444 struct bnxt *bp = netdev_priv(dev);
12446 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12447 nlflags, filter_mask, NULL);
12450 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12451 u16 flags, struct netlink_ext_ack *extack)
12453 struct bnxt *bp = netdev_priv(dev);
12454 struct nlattr *attr, *br_spec;
12457 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12458 return -EOPNOTSUPP;
12460 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12464 nla_for_each_nested(attr, br_spec, rem) {
12467 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12470 if (nla_len(attr) < sizeof(mode))
12473 mode = nla_get_u16(attr);
12474 if (mode == bp->br_mode)
12477 rc = bnxt_hwrm_set_br_mode(bp, mode);
12479 bp->br_mode = mode;
12485 int bnxt_get_port_parent_id(struct net_device *dev,
12486 struct netdev_phys_item_id *ppid)
12488 struct bnxt *bp = netdev_priv(dev);
12490 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12491 return -EOPNOTSUPP;
12493 /* The PF and it's VF-reps only support the switchdev framework */
12494 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12495 return -EOPNOTSUPP;
12497 ppid->id_len = sizeof(bp->dsn);
12498 memcpy(ppid->id, bp->dsn, ppid->id_len);
12503 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12505 struct bnxt *bp = netdev_priv(dev);
12507 return &bp->dl_port;
12510 static const struct net_device_ops bnxt_netdev_ops = {
12511 .ndo_open = bnxt_open,
12512 .ndo_start_xmit = bnxt_start_xmit,
12513 .ndo_stop = bnxt_close,
12514 .ndo_get_stats64 = bnxt_get_stats64,
12515 .ndo_set_rx_mode = bnxt_set_rx_mode,
12516 .ndo_do_ioctl = bnxt_ioctl,
12517 .ndo_validate_addr = eth_validate_addr,
12518 .ndo_set_mac_address = bnxt_change_mac_addr,
12519 .ndo_change_mtu = bnxt_change_mtu,
12520 .ndo_fix_features = bnxt_fix_features,
12521 .ndo_set_features = bnxt_set_features,
12522 .ndo_features_check = bnxt_features_check,
12523 .ndo_tx_timeout = bnxt_tx_timeout,
12524 #ifdef CONFIG_BNXT_SRIOV
12525 .ndo_get_vf_config = bnxt_get_vf_config,
12526 .ndo_set_vf_mac = bnxt_set_vf_mac,
12527 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
12528 .ndo_set_vf_rate = bnxt_set_vf_bw,
12529 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
12530 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
12531 .ndo_set_vf_trust = bnxt_set_vf_trust,
12533 .ndo_setup_tc = bnxt_setup_tc,
12534 #ifdef CONFIG_RFS_ACCEL
12535 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
12537 .ndo_bpf = bnxt_xdp,
12538 .ndo_xdp_xmit = bnxt_xdp_xmit,
12539 .ndo_bridge_getlink = bnxt_bridge_getlink,
12540 .ndo_bridge_setlink = bnxt_bridge_setlink,
12541 .ndo_get_devlink_port = bnxt_get_devlink_port,
12544 static void bnxt_remove_one(struct pci_dev *pdev)
12546 struct net_device *dev = pci_get_drvdata(pdev);
12547 struct bnxt *bp = netdev_priv(dev);
12550 bnxt_sriov_disable(bp);
12553 devlink_port_type_clear(&bp->dl_port);
12554 pci_disable_pcie_error_reporting(pdev);
12555 unregister_netdev(dev);
12556 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12557 /* Flush any pending tasks */
12558 cancel_work_sync(&bp->sp_task);
12559 cancel_delayed_work_sync(&bp->fw_reset_task);
12562 bnxt_dl_fw_reporters_destroy(bp, true);
12563 bnxt_dl_unregister(bp);
12564 bnxt_shutdown_tc(bp);
12566 bnxt_clear_int_mode(bp);
12567 bnxt_hwrm_func_drv_unrgtr(bp);
12568 bnxt_free_hwrm_resources(bp);
12569 bnxt_free_hwrm_short_cmd_req(bp);
12570 bnxt_ethtool_free(bp);
12574 kfree(bp->fw_health);
12575 bp->fw_health = NULL;
12576 bnxt_cleanup_pci(bp);
12577 bnxt_free_ctx_mem(bp);
12580 kfree(bp->rss_indir_tbl);
12581 bp->rss_indir_tbl = NULL;
12582 bnxt_free_port_stats(bp);
12586 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12589 struct bnxt_link_info *link_info = &bp->link_info;
12592 rc = bnxt_hwrm_phy_qcaps(bp);
12594 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12598 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12599 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12601 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
12605 rc = bnxt_update_link(bp, false);
12607 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12612 /* Older firmware does not have supported_auto_speeds, so assume
12613 * that all supported speeds can be autonegotiated.
12615 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12616 link_info->support_auto_speeds = link_info->support_speeds;
12618 bnxt_init_ethtool_link_settings(bp);
12622 static int bnxt_get_max_irq(struct pci_dev *pdev)
12626 if (!pdev->msix_cap)
12629 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12630 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12633 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12636 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12637 int max_ring_grps = 0, max_irq;
12639 *max_tx = hw_resc->max_tx_rings;
12640 *max_rx = hw_resc->max_rx_rings;
12641 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12642 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12643 bnxt_get_ulp_msix_num(bp),
12644 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12645 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12646 *max_cp = min_t(int, *max_cp, max_irq);
12647 max_ring_grps = hw_resc->max_hw_ring_grps;
12648 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12652 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12654 if (bp->flags & BNXT_FLAG_CHIP_P5) {
12655 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12656 /* On P5 chips, max_cp output param should be available NQs */
12659 *max_rx = min_t(int, *max_rx, max_ring_grps);
12662 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12666 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
12669 if (!rx || !tx || !cp)
12672 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12675 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12680 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12681 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12682 /* Not enough rings, try disabling agg rings. */
12683 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12684 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12686 /* set BNXT_FLAG_AGG_RINGS back for consistency */
12687 bp->flags |= BNXT_FLAG_AGG_RINGS;
12690 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12691 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12692 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12693 bnxt_set_ring_params(bp);
12696 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12697 int max_cp, max_stat, max_irq;
12699 /* Reserve minimum resources for RoCE */
12700 max_cp = bnxt_get_max_func_cp_rings(bp);
12701 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12702 max_irq = bnxt_get_max_func_irqs(bp);
12703 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12704 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12705 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12708 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12709 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12710 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12711 max_cp = min_t(int, max_cp, max_irq);
12712 max_cp = min_t(int, max_cp, max_stat);
12713 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12720 /* In initial default shared ring setting, each shared ring must have a
12723 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12725 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12726 bp->rx_nr_rings = bp->cp_nr_rings;
12727 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12728 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12731 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12733 int dflt_rings, max_rx_rings, max_tx_rings, rc;
12735 if (!bnxt_can_reserve_rings(bp))
12739 bp->flags |= BNXT_FLAG_SHARED_RINGS;
12740 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
12741 /* Reduce default rings on multi-port cards so that total default
12742 * rings do not exceed CPU count.
12744 if (bp->port_count > 1) {
12746 max_t(int, num_online_cpus() / bp->port_count, 1);
12748 dflt_rings = min_t(int, dflt_rings, max_rings);
12750 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
12753 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12754 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
12756 bnxt_trim_dflt_sh_rings(bp);
12758 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12759 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12761 rc = __bnxt_reserve_rings(bp);
12763 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
12764 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12766 bnxt_trim_dflt_sh_rings(bp);
12768 /* Rings may have been trimmed, re-reserve the trimmed rings. */
12769 if (bnxt_need_reserve_rings(bp)) {
12770 rc = __bnxt_reserve_rings(bp);
12772 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12773 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12775 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12780 bp->tx_nr_rings = 0;
12781 bp->rx_nr_rings = 0;
12786 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
12790 if (bp->tx_nr_rings)
12793 bnxt_ulp_irq_stop(bp);
12794 bnxt_clear_int_mode(bp);
12795 rc = bnxt_set_dflt_rings(bp, true);
12797 netdev_err(bp->dev, "Not enough rings available.\n");
12798 goto init_dflt_ring_err;
12800 rc = bnxt_init_int_mode(bp);
12802 goto init_dflt_ring_err;
12804 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12805 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
12806 bp->flags |= BNXT_FLAG_RFS;
12807 bp->dev->features |= NETIF_F_NTUPLE;
12809 init_dflt_ring_err:
12810 bnxt_ulp_irq_restart(bp, rc);
12814 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
12819 bnxt_hwrm_func_qcaps(bp);
12821 if (netif_running(bp->dev))
12822 __bnxt_close_nic(bp, true, false);
12824 bnxt_ulp_irq_stop(bp);
12825 bnxt_clear_int_mode(bp);
12826 rc = bnxt_init_int_mode(bp);
12827 bnxt_ulp_irq_restart(bp, rc);
12829 if (netif_running(bp->dev)) {
12831 dev_close(bp->dev);
12833 rc = bnxt_open_nic(bp, true, false);
12839 static int bnxt_init_mac_addr(struct bnxt *bp)
12844 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
12846 #ifdef CONFIG_BNXT_SRIOV
12847 struct bnxt_vf_info *vf = &bp->vf;
12848 bool strict_approval = true;
12850 if (is_valid_ether_addr(vf->mac_addr)) {
12851 /* overwrite netdev dev_addr with admin VF MAC */
12852 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
12853 /* Older PF driver or firmware may not approve this
12856 strict_approval = false;
12858 eth_hw_addr_random(bp->dev);
12860 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
12866 #define BNXT_VPD_LEN 512
12867 static void bnxt_vpd_read_info(struct bnxt *bp)
12869 struct pci_dev *pdev = bp->pdev;
12870 int i, len, pos, ro_size, size;
12874 vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
12878 vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
12879 if (vpd_size <= 0) {
12880 netdev_err(bp->dev, "Unable to read VPD\n");
12884 i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
12886 netdev_err(bp->dev, "VPD READ-Only not found\n");
12890 ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
12891 i += PCI_VPD_LRDT_TAG_SIZE;
12892 if (i + ro_size > vpd_size)
12895 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12896 PCI_VPD_RO_KEYWORD_PARTNO);
12900 len = pci_vpd_info_field_size(&vpd_data[pos]);
12901 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12902 if (len + pos > vpd_size)
12905 size = min(len, BNXT_VPD_FLD_LEN - 1);
12906 memcpy(bp->board_partno, &vpd_data[pos], size);
12909 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12910 PCI_VPD_RO_KEYWORD_SERIALNO);
12914 len = pci_vpd_info_field_size(&vpd_data[pos]);
12915 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12916 if (len + pos > vpd_size)
12919 size = min(len, BNXT_VPD_FLD_LEN - 1);
12920 memcpy(bp->board_serialno, &vpd_data[pos], size);
12925 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
12927 struct pci_dev *pdev = bp->pdev;
12930 qword = pci_get_dsn(pdev);
12932 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
12933 return -EOPNOTSUPP;
12936 put_unaligned_le64(qword, dsn);
12938 bp->flags |= BNXT_FLAG_DSN_VALID;
12942 static int bnxt_map_db_bar(struct bnxt *bp)
12946 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
12952 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
12954 struct net_device *dev;
12958 if (pci_is_bridge(pdev))
12961 /* Clear any pending DMA transactions from crash kernel
12962 * while loading driver in capture kernel.
12964 if (is_kdump_kernel()) {
12965 pci_clear_master(pdev);
12969 max_irqs = bnxt_get_max_irq(pdev);
12970 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
12974 bp = netdev_priv(dev);
12975 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
12976 bnxt_set_max_func_irqs(bp, max_irqs);
12978 if (bnxt_vf_pciid(ent->driver_data))
12979 bp->flags |= BNXT_FLAG_VF;
12981 if (pdev->msix_cap)
12982 bp->flags |= BNXT_FLAG_MSIX_CAP;
12984 rc = bnxt_init_board(pdev, dev);
12986 goto init_err_free;
12988 dev->netdev_ops = &bnxt_netdev_ops;
12989 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
12990 dev->ethtool_ops = &bnxt_ethtool_ops;
12991 pci_set_drvdata(pdev, dev);
12993 rc = bnxt_alloc_hwrm_resources(bp);
12995 goto init_err_pci_clean;
12997 mutex_init(&bp->hwrm_cmd_lock);
12998 mutex_init(&bp->link_lock);
13000 rc = bnxt_fw_init_one_p1(bp);
13002 goto init_err_pci_clean;
13005 bnxt_vpd_read_info(bp);
13007 if (BNXT_CHIP_P5(bp)) {
13008 bp->flags |= BNXT_FLAG_CHIP_P5;
13009 if (BNXT_CHIP_SR2(bp))
13010 bp->flags |= BNXT_FLAG_CHIP_SR2;
13013 rc = bnxt_alloc_rss_indir_tbl(bp);
13015 goto init_err_pci_clean;
13017 rc = bnxt_fw_init_one_p2(bp);
13019 goto init_err_pci_clean;
13021 rc = bnxt_map_db_bar(bp);
13023 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13025 goto init_err_pci_clean;
13028 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13029 NETIF_F_TSO | NETIF_F_TSO6 |
13030 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13031 NETIF_F_GSO_IPXIP4 |
13032 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13033 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13034 NETIF_F_RXCSUM | NETIF_F_GRO;
13036 if (BNXT_SUPPORTS_TPA(bp))
13037 dev->hw_features |= NETIF_F_LRO;
13039 dev->hw_enc_features =
13040 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13041 NETIF_F_TSO | NETIF_F_TSO6 |
13042 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13043 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13044 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13045 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13047 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13048 NETIF_F_GSO_GRE_CSUM;
13049 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13050 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13051 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13052 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13053 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13054 if (BNXT_SUPPORTS_TPA(bp))
13055 dev->hw_features |= NETIF_F_GRO_HW;
13056 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13057 if (dev->features & NETIF_F_GRO_HW)
13058 dev->features &= ~NETIF_F_LRO;
13059 dev->priv_flags |= IFF_UNICAST_FLT;
13061 #ifdef CONFIG_BNXT_SRIOV
13062 init_waitqueue_head(&bp->sriov_cfg_wait);
13063 mutex_init(&bp->sriov_lock);
13065 if (BNXT_SUPPORTS_TPA(bp)) {
13066 bp->gro_func = bnxt_gro_func_5730x;
13067 if (BNXT_CHIP_P4(bp))
13068 bp->gro_func = bnxt_gro_func_5731x;
13069 else if (BNXT_CHIP_P5(bp))
13070 bp->gro_func = bnxt_gro_func_5750x;
13072 if (!BNXT_CHIP_P4_PLUS(bp))
13073 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13075 rc = bnxt_init_mac_addr(bp);
13077 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13078 rc = -EADDRNOTAVAIL;
13079 goto init_err_pci_clean;
13083 /* Read the adapter's DSN to use as the eswitch switch_id */
13084 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13087 /* MTU range: 60 - FW defined max */
13088 dev->min_mtu = ETH_ZLEN;
13089 dev->max_mtu = bp->max_mtu;
13091 rc = bnxt_probe_phy(bp, true);
13093 goto init_err_pci_clean;
13095 bnxt_set_rx_skb_mode(bp, false);
13096 bnxt_set_tpa_flags(bp);
13097 bnxt_set_ring_params(bp);
13098 rc = bnxt_set_dflt_rings(bp, true);
13100 netdev_err(bp->dev, "Not enough rings available.\n");
13102 goto init_err_pci_clean;
13105 bnxt_fw_init_one_p3(bp);
13107 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13108 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13110 rc = bnxt_init_int_mode(bp);
13112 goto init_err_pci_clean;
13114 /* No TC has been set yet and rings may have been trimmed due to
13115 * limited MSIX, so we re-initialize the TX rings per TC.
13117 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13122 create_singlethread_workqueue("bnxt_pf_wq");
13124 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13126 goto init_err_pci_clean;
13129 rc = bnxt_init_tc(bp);
13131 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13135 bnxt_inv_fw_health_reg(bp);
13136 bnxt_dl_register(bp);
13138 rc = register_netdev(dev);
13140 goto init_err_cleanup;
13143 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13144 bnxt_dl_fw_reporters_create(bp);
13146 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13147 board_info[ent->driver_data].name,
13148 (long)pci_resource_start(pdev, 0), dev->dev_addr);
13149 pcie_print_link_status(pdev);
13151 pci_save_state(pdev);
13155 bnxt_dl_unregister(bp);
13156 bnxt_shutdown_tc(bp);
13157 bnxt_clear_int_mode(bp);
13159 init_err_pci_clean:
13160 bnxt_hwrm_func_drv_unrgtr(bp);
13161 bnxt_free_hwrm_short_cmd_req(bp);
13162 bnxt_free_hwrm_resources(bp);
13163 bnxt_ethtool_free(bp);
13164 kfree(bp->fw_health);
13165 bp->fw_health = NULL;
13166 bnxt_cleanup_pci(bp);
13167 bnxt_free_ctx_mem(bp);
13170 kfree(bp->rss_indir_tbl);
13171 bp->rss_indir_tbl = NULL;
13178 static void bnxt_shutdown(struct pci_dev *pdev)
13180 struct net_device *dev = pci_get_drvdata(pdev);
13187 bp = netdev_priv(dev);
13189 goto shutdown_exit;
13191 if (netif_running(dev))
13194 bnxt_ulp_shutdown(bp);
13195 bnxt_clear_int_mode(bp);
13196 pci_disable_device(pdev);
13198 if (system_state == SYSTEM_POWER_OFF) {
13199 pci_wake_from_d3(pdev, bp->wol);
13200 pci_set_power_state(pdev, PCI_D3hot);
13207 #ifdef CONFIG_PM_SLEEP
13208 static int bnxt_suspend(struct device *device)
13210 struct net_device *dev = dev_get_drvdata(device);
13211 struct bnxt *bp = netdev_priv(dev);
13216 if (netif_running(dev)) {
13217 netif_device_detach(dev);
13218 rc = bnxt_close(dev);
13220 bnxt_hwrm_func_drv_unrgtr(bp);
13221 pci_disable_device(bp->pdev);
13222 bnxt_free_ctx_mem(bp);
13229 static int bnxt_resume(struct device *device)
13231 struct net_device *dev = dev_get_drvdata(device);
13232 struct bnxt *bp = netdev_priv(dev);
13236 rc = pci_enable_device(bp->pdev);
13238 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13242 pci_set_master(bp->pdev);
13243 if (bnxt_hwrm_ver_get(bp)) {
13247 rc = bnxt_hwrm_func_reset(bp);
13253 rc = bnxt_hwrm_func_qcaps(bp);
13257 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13262 bnxt_get_wol_settings(bp);
13263 if (netif_running(dev)) {
13264 rc = bnxt_open(dev);
13266 netif_device_attach(dev);
13270 bnxt_ulp_start(bp, rc);
13272 bnxt_reenable_sriov(bp);
13277 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13278 #define BNXT_PM_OPS (&bnxt_pm_ops)
13282 #define BNXT_PM_OPS NULL
13284 #endif /* CONFIG_PM_SLEEP */
13287 * bnxt_io_error_detected - called when PCI error is detected
13288 * @pdev: Pointer to PCI device
13289 * @state: The current pci connection state
13291 * This function is called after a PCI bus error affecting
13292 * this device has been detected.
13294 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13295 pci_channel_state_t state)
13297 struct net_device *netdev = pci_get_drvdata(pdev);
13298 struct bnxt *bp = netdev_priv(netdev);
13300 netdev_info(netdev, "PCI I/O error detected\n");
13303 netif_device_detach(netdev);
13307 if (state == pci_channel_io_perm_failure) {
13309 return PCI_ERS_RESULT_DISCONNECT;
13312 if (state == pci_channel_io_frozen)
13313 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13315 if (netif_running(netdev))
13316 bnxt_close(netdev);
13318 pci_disable_device(pdev);
13319 bnxt_free_ctx_mem(bp);
13324 /* Request a slot slot reset. */
13325 return PCI_ERS_RESULT_NEED_RESET;
13329 * bnxt_io_slot_reset - called after the pci bus has been reset.
13330 * @pdev: Pointer to PCI device
13332 * Restart the card from scratch, as if from a cold-boot.
13333 * At this point, the card has exprienced a hard reset,
13334 * followed by fixups by BIOS, and has its config space
13335 * set up identically to what it was at cold boot.
13337 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13339 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13340 struct net_device *netdev = pci_get_drvdata(pdev);
13341 struct bnxt *bp = netdev_priv(netdev);
13344 netdev_info(bp->dev, "PCI Slot Reset\n");
13348 if (pci_enable_device(pdev)) {
13349 dev_err(&pdev->dev,
13350 "Cannot re-enable PCI device after reset.\n");
13352 pci_set_master(pdev);
13353 /* Upon fatal error, our device internal logic that latches to
13354 * BAR value is getting reset and will restore only upon
13355 * rewritting the BARs.
13357 * As pci_restore_state() does not re-write the BARs if the
13358 * value is same as saved value earlier, driver needs to
13359 * write the BARs to 0 to force restore, in case of fatal error.
13361 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13363 for (off = PCI_BASE_ADDRESS_0;
13364 off <= PCI_BASE_ADDRESS_5; off += 4)
13365 pci_write_config_dword(bp->pdev, off, 0);
13367 pci_restore_state(pdev);
13368 pci_save_state(pdev);
13370 err = bnxt_hwrm_func_reset(bp);
13372 result = PCI_ERS_RESULT_RECOVERED;
13381 * bnxt_io_resume - called when traffic can start flowing again.
13382 * @pdev: Pointer to PCI device
13384 * This callback is called when the error recovery driver tells
13385 * us that its OK to resume normal operation.
13387 static void bnxt_io_resume(struct pci_dev *pdev)
13389 struct net_device *netdev = pci_get_drvdata(pdev);
13390 struct bnxt *bp = netdev_priv(netdev);
13393 netdev_info(bp->dev, "PCI Slot Resume\n");
13396 err = bnxt_hwrm_func_qcaps(bp);
13397 if (!err && netif_running(netdev))
13398 err = bnxt_open(netdev);
13400 bnxt_ulp_start(bp, err);
13402 bnxt_reenable_sriov(bp);
13403 netif_device_attach(netdev);
13409 static const struct pci_error_handlers bnxt_err_handler = {
13410 .error_detected = bnxt_io_error_detected,
13411 .slot_reset = bnxt_io_slot_reset,
13412 .resume = bnxt_io_resume
13415 static struct pci_driver bnxt_pci_driver = {
13416 .name = DRV_MODULE_NAME,
13417 .id_table = bnxt_pci_tbl,
13418 .probe = bnxt_init_one,
13419 .remove = bnxt_remove_one,
13420 .shutdown = bnxt_shutdown,
13421 .driver.pm = BNXT_PM_OPS,
13422 .err_handler = &bnxt_err_handler,
13423 #if defined(CONFIG_BNXT_SRIOV)
13424 .sriov_configure = bnxt_sriov_configure,
13428 static int __init bnxt_init(void)
13431 return pci_register_driver(&bnxt_pci_driver);
13434 static void __exit bnxt_exit(void)
13436 pci_unregister_driver(&bnxt_pci_driver);
13438 destroy_workqueue(bnxt_pf_wq);
13442 module_init(bnxt_init);
13443 module_exit(bnxt_exit);