1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 #include <linux/bitmap.h>
53 #include <linux/cpu_rmap.h>
54 #include <linux/cpumask.h>
55 #include <net/pkt_cls.h>
56 #include <linux/hwmon.h>
57 #include <linux/hwmon-sysfs.h>
58 #include <net/page_pool.h>
62 #include "bnxt_hwrm.h"
64 #include "bnxt_sriov.h"
65 #include "bnxt_ethtool.h"
71 #include "bnxt_devlink.h"
72 #include "bnxt_debugfs.h"
74 #define BNXT_TX_TIMEOUT (5 * HZ)
75 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
78 MODULE_LICENSE("GPL");
79 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
81 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
82 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
83 #define BNXT_RX_COPY_THRESH 256
85 #define BNXT_TX_PUSH_THRESH 164
87 /* indexed by enum board_idx */
91 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
92 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
93 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
94 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
95 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
96 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
97 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
98 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
99 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
100 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
101 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
102 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
103 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
104 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
105 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
106 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
108 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
109 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
110 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
111 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
112 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
113 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
114 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
115 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
116 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
117 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
118 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
119 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
120 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
121 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
122 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
123 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
124 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
125 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
126 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
127 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
128 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
129 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
130 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
131 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
132 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
133 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
134 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
135 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
138 static const struct pci_device_id bnxt_pci_tbl[] = {
139 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
140 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
141 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
142 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
143 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
144 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
145 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
146 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
147 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
148 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
149 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
150 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
151 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
152 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
153 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
154 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
155 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
156 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
157 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
158 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
159 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
160 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
161 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
162 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
163 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
164 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
165 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
166 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
167 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
168 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
169 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
170 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
171 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
172 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
173 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
174 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
175 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
176 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
177 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
178 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
179 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
180 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
182 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
183 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
184 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
185 #ifdef CONFIG_BNXT_SRIOV
186 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
187 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
188 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
189 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
190 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
191 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
192 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
193 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
194 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
195 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
196 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
197 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
198 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
199 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
200 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
201 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
202 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
203 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
204 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
205 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
206 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
211 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
213 static const u16 bnxt_vf_req_snif[] = {
217 HWRM_CFA_L2_FILTER_ALLOC,
220 static const u16 bnxt_async_events_arr[] = {
221 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
222 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
223 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
224 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
225 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
226 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
227 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
228 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
229 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
230 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
231 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
232 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
233 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
234 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
235 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
236 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
239 static struct workqueue_struct *bnxt_pf_wq;
241 static bool bnxt_vf_pciid(enum board_idx idx)
243 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
244 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
245 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
246 idx == NETXTREME_E_P5_VF_HV);
249 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
250 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
251 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
253 #define BNXT_CP_DB_IRQ_DIS(db) \
254 writel(DB_CP_IRQ_DIS_FLAGS, db)
256 #define BNXT_DB_CQ(db, idx) \
257 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
259 #define BNXT_DB_NQ_P5(db, idx) \
260 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
263 #define BNXT_DB_CQ_ARM(db, idx) \
264 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
266 #define BNXT_DB_NQ_ARM_P5(db, idx) \
267 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
270 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
272 if (bp->flags & BNXT_FLAG_CHIP_P5)
273 BNXT_DB_NQ_P5(db, idx);
278 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
280 if (bp->flags & BNXT_FLAG_CHIP_P5)
281 BNXT_DB_NQ_ARM_P5(db, idx);
283 BNXT_DB_CQ_ARM(db, idx);
286 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
288 if (bp->flags & BNXT_FLAG_CHIP_P5)
289 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
290 RING_CMP(idx), db->doorbell);
295 const u16 bnxt_lhint_arr[] = {
296 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
297 TX_BD_FLAGS_LHINT_512_TO_1023,
298 TX_BD_FLAGS_LHINT_1024_TO_2047,
299 TX_BD_FLAGS_LHINT_1024_TO_2047,
300 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
301 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
302 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
303 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
304 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
308 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
309 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
310 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
311 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
312 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
313 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
314 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
317 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
319 struct metadata_dst *md_dst = skb_metadata_dst(skb);
321 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
324 return md_dst->u.port_info.port_id;
327 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
330 bnxt_db_write(bp, &txr->tx_db, prod);
331 txr->kick_pending = 0;
334 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
335 struct bnxt_tx_ring_info *txr,
336 struct netdev_queue *txq)
338 netif_tx_stop_queue(txq);
340 /* netif_tx_stop_queue() must be done before checking
341 * tx index in bnxt_tx_avail() below, because in
342 * bnxt_tx_int(), we update tx index before checking for
343 * netif_tx_queue_stopped().
346 if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
347 netif_tx_wake_queue(txq);
354 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
356 struct bnxt *bp = netdev_priv(dev);
358 struct tx_bd_ext *txbd1;
359 struct netdev_queue *txq;
362 unsigned int length, pad = 0;
363 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
365 struct pci_dev *pdev = bp->pdev;
366 struct bnxt_tx_ring_info *txr;
367 struct bnxt_sw_tx_bd *tx_buf;
370 i = skb_get_queue_mapping(skb);
371 if (unlikely(i >= bp->tx_nr_rings)) {
372 dev_kfree_skb_any(skb);
373 dev_core_stats_tx_dropped_inc(dev);
377 txq = netdev_get_tx_queue(dev, i);
378 txr = &bp->tx_ring[bp->tx_ring_map[i]];
381 free_size = bnxt_tx_avail(bp, txr);
382 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
383 /* We must have raced with NAPI cleanup */
384 if (net_ratelimit() && txr->kick_pending)
385 netif_warn(bp, tx_err, dev,
386 "bnxt: ring busy w/ flush pending!\n");
387 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
388 return NETDEV_TX_BUSY;
392 len = skb_headlen(skb);
393 last_frag = skb_shinfo(skb)->nr_frags;
395 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
397 txbd->tx_bd_opaque = prod;
399 tx_buf = &txr->tx_buf_ring[prod];
401 tx_buf->nr_frags = last_frag;
404 cfa_action = bnxt_xmit_get_cfa_action(skb);
405 if (skb_vlan_tag_present(skb)) {
406 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
407 skb_vlan_tag_get(skb);
408 /* Currently supports 8021Q, 8021AD vlan offloads
409 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
411 if (skb->vlan_proto == htons(ETH_P_8021Q))
412 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
415 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
416 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
418 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
419 atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
420 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
423 ptp->tx_hdr_off += VLAN_HLEN;
424 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
425 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
427 atomic_inc(&bp->ptp_cfg->tx_avail);
432 if (unlikely(skb->no_fcs))
433 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
435 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
437 struct tx_push_buffer *tx_push_buf = txr->tx_push;
438 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
439 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
440 void __iomem *db = txr->tx_db.doorbell;
441 void *pdata = tx_push_buf->data;
445 /* Set COAL_NOW to be ready quickly for the next push */
446 tx_push->tx_bd_len_flags_type =
447 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
448 TX_BD_TYPE_LONG_TX_BD |
449 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
450 TX_BD_FLAGS_COAL_NOW |
451 TX_BD_FLAGS_PACKET_END |
452 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
454 if (skb->ip_summed == CHECKSUM_PARTIAL)
455 tx_push1->tx_bd_hsize_lflags =
456 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
458 tx_push1->tx_bd_hsize_lflags = 0;
460 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
461 tx_push1->tx_bd_cfa_action =
462 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
464 end = pdata + length;
465 end = PTR_ALIGN(end, 8) - 1;
468 skb_copy_from_linear_data(skb, pdata, len);
470 for (j = 0; j < last_frag; j++) {
471 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
474 fptr = skb_frag_address_safe(frag);
478 memcpy(pdata, fptr, skb_frag_size(frag));
479 pdata += skb_frag_size(frag);
482 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
483 txbd->tx_bd_haddr = txr->data_mapping;
484 prod = NEXT_TX(prod);
485 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
486 memcpy(txbd, tx_push1, sizeof(*txbd));
487 prod = NEXT_TX(prod);
489 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
493 netdev_tx_sent_queue(txq, skb->len);
494 wmb(); /* Sync is_push and byte queue before pushing data */
496 push_len = (length + sizeof(*tx_push) + 7) / 8;
498 __iowrite64_copy(db, tx_push_buf, 16);
499 __iowrite32_copy(db + 4, tx_push_buf + 1,
500 (push_len - 16) << 1);
502 __iowrite64_copy(db, tx_push_buf, push_len);
509 if (length < BNXT_MIN_PKT_SIZE) {
510 pad = BNXT_MIN_PKT_SIZE - length;
511 if (skb_pad(skb, pad))
512 /* SKB already freed. */
513 goto tx_kick_pending;
514 length = BNXT_MIN_PKT_SIZE;
517 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
519 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
522 dma_unmap_addr_set(tx_buf, mapping, mapping);
523 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
524 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
526 txbd->tx_bd_haddr = cpu_to_le64(mapping);
528 prod = NEXT_TX(prod);
529 txbd1 = (struct tx_bd_ext *)
530 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
532 txbd1->tx_bd_hsize_lflags = lflags;
533 if (skb_is_gso(skb)) {
536 if (skb->encapsulation)
537 hdr_len = skb_inner_network_offset(skb) +
538 skb_inner_network_header_len(skb) +
539 inner_tcp_hdrlen(skb);
541 hdr_len = skb_transport_offset(skb) +
544 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
546 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
547 length = skb_shinfo(skb)->gso_size;
548 txbd1->tx_bd_mss = cpu_to_le32(length);
550 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
551 txbd1->tx_bd_hsize_lflags |=
552 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
553 txbd1->tx_bd_mss = 0;
557 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
558 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
563 flags |= bnxt_lhint_arr[length];
564 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
566 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
567 txbd1->tx_bd_cfa_action =
568 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
569 for (i = 0; i < last_frag; i++) {
570 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
572 prod = NEXT_TX(prod);
573 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
575 len = skb_frag_size(frag);
576 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
579 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
582 tx_buf = &txr->tx_buf_ring[prod];
583 dma_unmap_addr_set(tx_buf, mapping, mapping);
585 txbd->tx_bd_haddr = cpu_to_le64(mapping);
587 flags = len << TX_BD_LEN_SHIFT;
588 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
592 txbd->tx_bd_len_flags_type =
593 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
594 TX_BD_FLAGS_PACKET_END);
596 netdev_tx_sent_queue(txq, skb->len);
598 skb_tx_timestamp(skb);
600 /* Sync BD data before updating doorbell */
603 prod = NEXT_TX(prod);
606 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
607 bnxt_txr_db_kick(bp, txr, prod);
609 txr->kick_pending = 1;
613 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
614 if (netdev_xmit_more() && !tx_buf->is_push)
615 bnxt_txr_db_kick(bp, txr, prod);
617 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
622 if (BNXT_TX_PTP_IS_SET(lflags))
623 atomic_inc(&bp->ptp_cfg->tx_avail);
627 /* start back at beginning and unmap skb */
629 tx_buf = &txr->tx_buf_ring[prod];
630 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
631 skb_headlen(skb), DMA_TO_DEVICE);
632 prod = NEXT_TX(prod);
634 /* unmap remaining mapped pages */
635 for (i = 0; i < last_frag; i++) {
636 prod = NEXT_TX(prod);
637 tx_buf = &txr->tx_buf_ring[prod];
638 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
639 skb_frag_size(&skb_shinfo(skb)->frags[i]),
644 dev_kfree_skb_any(skb);
646 if (txr->kick_pending)
647 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
648 txr->tx_buf_ring[txr->tx_prod].skb = NULL;
649 dev_core_stats_tx_dropped_inc(dev);
653 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
655 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
656 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
657 u16 cons = txr->tx_cons;
658 struct pci_dev *pdev = bp->pdev;
660 unsigned int tx_bytes = 0;
662 for (i = 0; i < nr_pkts; i++) {
663 struct bnxt_sw_tx_bd *tx_buf;
664 bool compl_deferred = false;
668 tx_buf = &txr->tx_buf_ring[cons];
669 cons = NEXT_TX(cons);
673 if (tx_buf->is_push) {
678 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
679 skb_headlen(skb), DMA_TO_DEVICE);
680 last = tx_buf->nr_frags;
682 for (j = 0; j < last; j++) {
683 cons = NEXT_TX(cons);
684 tx_buf = &txr->tx_buf_ring[cons];
687 dma_unmap_addr(tx_buf, mapping),
688 skb_frag_size(&skb_shinfo(skb)->frags[j]),
691 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
692 if (bp->flags & BNXT_FLAG_CHIP_P5) {
693 if (!bnxt_get_tx_ts_p5(bp, skb))
694 compl_deferred = true;
696 atomic_inc(&bp->ptp_cfg->tx_avail);
701 cons = NEXT_TX(cons);
703 tx_bytes += skb->len;
705 dev_kfree_skb_any(skb);
708 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
711 /* Need to make the tx_cons update visible to bnxt_start_xmit()
712 * before checking for netif_tx_queue_stopped(). Without the
713 * memory barrier, there is a small possibility that bnxt_start_xmit()
714 * will miss it and cause the queue to be stopped forever.
718 if (unlikely(netif_tx_queue_stopped(txq)) &&
719 bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
720 READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
721 netif_tx_wake_queue(txq);
724 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
725 struct bnxt_rx_ring_info *rxr,
728 struct device *dev = &bp->pdev->dev;
731 page = page_pool_dev_alloc_pages(rxr->page_pool);
735 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
736 DMA_ATTR_WEAK_ORDERING);
737 if (dma_mapping_error(dev, *mapping)) {
738 page_pool_recycle_direct(rxr->page_pool, page);
741 *mapping += bp->rx_dma_offset;
745 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
749 struct pci_dev *pdev = bp->pdev;
751 if (gfp == GFP_ATOMIC)
752 data = napi_alloc_frag(bp->rx_buf_size);
754 data = netdev_alloc_frag(bp->rx_buf_size);
758 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
759 bp->rx_buf_use_size, bp->rx_dir,
760 DMA_ATTR_WEAK_ORDERING);
762 if (dma_mapping_error(&pdev->dev, *mapping)) {
769 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
772 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
773 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
776 if (BNXT_RX_PAGE_MODE(bp)) {
778 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
784 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
786 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
792 rx_buf->data_ptr = data + bp->rx_offset;
794 rx_buf->mapping = mapping;
796 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
800 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
802 u16 prod = rxr->rx_prod;
803 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
804 struct rx_bd *cons_bd, *prod_bd;
806 prod_rx_buf = &rxr->rx_buf_ring[prod];
807 cons_rx_buf = &rxr->rx_buf_ring[cons];
809 prod_rx_buf->data = data;
810 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
812 prod_rx_buf->mapping = cons_rx_buf->mapping;
814 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
815 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
817 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
820 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
822 u16 next, max = rxr->rx_agg_bmap_size;
824 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
826 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
830 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
831 struct bnxt_rx_ring_info *rxr,
835 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
836 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
837 struct pci_dev *pdev = bp->pdev;
840 u16 sw_prod = rxr->rx_sw_agg_prod;
841 unsigned int offset = 0;
843 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
846 page = alloc_page(gfp);
850 rxr->rx_page_offset = 0;
852 offset = rxr->rx_page_offset;
853 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
854 if (rxr->rx_page_offset == PAGE_SIZE)
859 page = alloc_page(gfp);
864 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
865 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
866 DMA_ATTR_WEAK_ORDERING);
867 if (dma_mapping_error(&pdev->dev, mapping)) {
872 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
873 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
875 __set_bit(sw_prod, rxr->rx_agg_bmap);
876 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
877 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
879 rx_agg_buf->page = page;
880 rx_agg_buf->offset = offset;
881 rx_agg_buf->mapping = mapping;
882 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
883 rxbd->rx_bd_opaque = sw_prod;
887 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
888 struct bnxt_cp_ring_info *cpr,
889 u16 cp_cons, u16 curr)
891 struct rx_agg_cmp *agg;
893 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
894 agg = (struct rx_agg_cmp *)
895 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
899 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
900 struct bnxt_rx_ring_info *rxr,
901 u16 agg_id, u16 curr)
903 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
905 return &tpa_info->agg_arr[curr];
908 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
909 u16 start, u32 agg_bufs, bool tpa)
911 struct bnxt_napi *bnapi = cpr->bnapi;
912 struct bnxt *bp = bnapi->bp;
913 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
914 u16 prod = rxr->rx_agg_prod;
915 u16 sw_prod = rxr->rx_sw_agg_prod;
919 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
922 for (i = 0; i < agg_bufs; i++) {
924 struct rx_agg_cmp *agg;
925 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
926 struct rx_bd *prod_bd;
930 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
932 agg = bnxt_get_agg(bp, cpr, idx, start + i);
933 cons = agg->rx_agg_cmp_opaque;
934 __clear_bit(cons, rxr->rx_agg_bmap);
936 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
937 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
939 __set_bit(sw_prod, rxr->rx_agg_bmap);
940 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
941 cons_rx_buf = &rxr->rx_agg_ring[cons];
943 /* It is possible for sw_prod to be equal to cons, so
944 * set cons_rx_buf->page to NULL first.
946 page = cons_rx_buf->page;
947 cons_rx_buf->page = NULL;
948 prod_rx_buf->page = page;
949 prod_rx_buf->offset = cons_rx_buf->offset;
951 prod_rx_buf->mapping = cons_rx_buf->mapping;
953 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
955 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
956 prod_bd->rx_bd_opaque = sw_prod;
958 prod = NEXT_RX_AGG(prod);
959 sw_prod = NEXT_RX_AGG(sw_prod);
961 rxr->rx_agg_prod = prod;
962 rxr->rx_sw_agg_prod = sw_prod;
965 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
966 struct bnxt_rx_ring_info *rxr,
967 u16 cons, void *data, u8 *data_ptr,
969 unsigned int offset_and_len)
971 unsigned int payload = offset_and_len >> 16;
972 unsigned int len = offset_and_len & 0xffff;
974 struct page *page = data;
975 u16 prod = rxr->rx_prod;
979 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
981 bnxt_reuse_rx_data(rxr, cons, data);
984 dma_addr -= bp->rx_dma_offset;
985 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
986 DMA_ATTR_WEAK_ORDERING);
987 page_pool_release_page(rxr->page_pool, page);
989 if (unlikely(!payload))
990 payload = eth_get_headlen(bp->dev, data_ptr, len);
992 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
998 off = (void *)data_ptr - page_address(page);
999 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1000 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1001 payload + NET_IP_ALIGN);
1003 frag = &skb_shinfo(skb)->frags[0];
1004 skb_frag_size_sub(frag, payload);
1005 skb_frag_off_add(frag, payload);
1006 skb->data_len -= payload;
1007 skb->tail += payload;
1012 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1013 struct bnxt_rx_ring_info *rxr, u16 cons,
1014 void *data, u8 *data_ptr,
1015 dma_addr_t dma_addr,
1016 unsigned int offset_and_len)
1018 u16 prod = rxr->rx_prod;
1019 struct sk_buff *skb;
1022 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1023 if (unlikely(err)) {
1024 bnxt_reuse_rx_data(rxr, cons, data);
1028 skb = build_skb(data, bp->rx_buf_size);
1029 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1030 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1032 skb_free_frag(data);
1036 skb_reserve(skb, bp->rx_offset);
1037 skb_put(skb, offset_and_len & 0xffff);
1041 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1042 struct bnxt_cp_ring_info *cpr,
1043 struct sk_buff *skb, u16 idx,
1044 u32 agg_bufs, bool tpa)
1046 struct bnxt_napi *bnapi = cpr->bnapi;
1047 struct pci_dev *pdev = bp->pdev;
1048 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1049 u16 prod = rxr->rx_agg_prod;
1050 bool p5_tpa = false;
1053 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1056 for (i = 0; i < agg_bufs; i++) {
1058 struct rx_agg_cmp *agg;
1059 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1064 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1066 agg = bnxt_get_agg(bp, cpr, idx, i);
1067 cons = agg->rx_agg_cmp_opaque;
1068 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1069 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1071 cons_rx_buf = &rxr->rx_agg_ring[cons];
1072 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1073 cons_rx_buf->offset, frag_len);
1074 __clear_bit(cons, rxr->rx_agg_bmap);
1076 /* It is possible for bnxt_alloc_rx_page() to allocate
1077 * a sw_prod index that equals the cons index, so we
1078 * need to clear the cons entry now.
1080 mapping = cons_rx_buf->mapping;
1081 page = cons_rx_buf->page;
1082 cons_rx_buf->page = NULL;
1084 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1085 struct skb_shared_info *shinfo;
1086 unsigned int nr_frags;
1088 shinfo = skb_shinfo(skb);
1089 nr_frags = --shinfo->nr_frags;
1090 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1094 cons_rx_buf->page = page;
1096 /* Update prod since possibly some pages have been
1097 * allocated already.
1099 rxr->rx_agg_prod = prod;
1100 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1104 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1106 DMA_ATTR_WEAK_ORDERING);
1108 skb->data_len += frag_len;
1109 skb->len += frag_len;
1110 skb->truesize += PAGE_SIZE;
1112 prod = NEXT_RX_AGG(prod);
1114 rxr->rx_agg_prod = prod;
1118 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1119 u8 agg_bufs, u32 *raw_cons)
1122 struct rx_agg_cmp *agg;
1124 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1125 last = RING_CMP(*raw_cons);
1126 agg = (struct rx_agg_cmp *)
1127 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1128 return RX_AGG_CMP_VALID(agg, *raw_cons);
1131 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1135 struct bnxt *bp = bnapi->bp;
1136 struct pci_dev *pdev = bp->pdev;
1137 struct sk_buff *skb;
1139 skb = napi_alloc_skb(&bnapi->napi, len);
1143 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1146 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1147 len + NET_IP_ALIGN);
1149 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1156 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1157 u32 *raw_cons, void *cmp)
1159 struct rx_cmp *rxcmp = cmp;
1160 u32 tmp_raw_cons = *raw_cons;
1161 u8 cmp_type, agg_bufs = 0;
1163 cmp_type = RX_CMP_TYPE(rxcmp);
1165 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1166 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1168 RX_CMP_AGG_BUFS_SHIFT;
1169 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1170 struct rx_tpa_end_cmp *tpa_end = cmp;
1172 if (bp->flags & BNXT_FLAG_CHIP_P5)
1175 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1179 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1182 *raw_cons = tmp_raw_cons;
1186 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1188 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1192 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1194 schedule_delayed_work(&bp->fw_reset_task, delay);
1197 static void bnxt_queue_sp_work(struct bnxt *bp)
1200 queue_work(bnxt_pf_wq, &bp->sp_task);
1202 schedule_work(&bp->sp_task);
1205 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1207 if (!rxr->bnapi->in_reset) {
1208 rxr->bnapi->in_reset = true;
1209 if (bp->flags & BNXT_FLAG_CHIP_P5)
1210 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1212 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1213 bnxt_queue_sp_work(bp);
1215 rxr->rx_next_cons = 0xffff;
1218 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1220 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1221 u16 idx = agg_id & MAX_TPA_P5_MASK;
1223 if (test_bit(idx, map->agg_idx_bmap))
1224 idx = find_first_zero_bit(map->agg_idx_bmap,
1225 BNXT_AGG_IDX_BMAP_SIZE);
1226 __set_bit(idx, map->agg_idx_bmap);
1227 map->agg_id_tbl[agg_id] = idx;
1231 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1233 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1235 __clear_bit(idx, map->agg_idx_bmap);
1238 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1240 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1242 return map->agg_id_tbl[agg_id];
1245 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1246 struct rx_tpa_start_cmp *tpa_start,
1247 struct rx_tpa_start_cmp_ext *tpa_start1)
1249 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1250 struct bnxt_tpa_info *tpa_info;
1251 u16 cons, prod, agg_id;
1252 struct rx_bd *prod_bd;
1255 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1256 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1257 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1259 agg_id = TPA_START_AGG_ID(tpa_start);
1261 cons = tpa_start->rx_tpa_start_cmp_opaque;
1262 prod = rxr->rx_prod;
1263 cons_rx_buf = &rxr->rx_buf_ring[cons];
1264 prod_rx_buf = &rxr->rx_buf_ring[prod];
1265 tpa_info = &rxr->rx_tpa[agg_id];
1267 if (unlikely(cons != rxr->rx_next_cons ||
1268 TPA_START_ERROR(tpa_start))) {
1269 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1270 cons, rxr->rx_next_cons,
1271 TPA_START_ERROR_CODE(tpa_start1));
1272 bnxt_sched_reset(bp, rxr);
1275 /* Store cfa_code in tpa_info to use in tpa_end
1276 * completion processing.
1278 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1279 prod_rx_buf->data = tpa_info->data;
1280 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1282 mapping = tpa_info->mapping;
1283 prod_rx_buf->mapping = mapping;
1285 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1287 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1289 tpa_info->data = cons_rx_buf->data;
1290 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1291 cons_rx_buf->data = NULL;
1292 tpa_info->mapping = cons_rx_buf->mapping;
1295 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1296 RX_TPA_START_CMP_LEN_SHIFT;
1297 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1298 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1300 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1301 tpa_info->gso_type = SKB_GSO_TCPV4;
1302 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1303 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1304 tpa_info->gso_type = SKB_GSO_TCPV6;
1305 tpa_info->rss_hash =
1306 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1308 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1309 tpa_info->gso_type = 0;
1310 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1312 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1313 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1314 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1315 tpa_info->agg_count = 0;
1317 rxr->rx_prod = NEXT_RX(prod);
1318 cons = NEXT_RX(cons);
1319 rxr->rx_next_cons = NEXT_RX(cons);
1320 cons_rx_buf = &rxr->rx_buf_ring[cons];
1322 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1323 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1324 cons_rx_buf->data = NULL;
1327 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1330 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1334 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1336 struct udphdr *uh = NULL;
1338 if (ip_proto == htons(ETH_P_IP)) {
1339 struct iphdr *iph = (struct iphdr *)skb->data;
1341 if (iph->protocol == IPPROTO_UDP)
1342 uh = (struct udphdr *)(iph + 1);
1344 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1346 if (iph->nexthdr == IPPROTO_UDP)
1347 uh = (struct udphdr *)(iph + 1);
1351 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1353 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1358 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1359 int payload_off, int tcp_ts,
1360 struct sk_buff *skb)
1365 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1366 u32 hdr_info = tpa_info->hdr_info;
1367 bool loopback = false;
1369 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1370 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1371 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1373 /* If the packet is an internal loopback packet, the offsets will
1374 * have an extra 4 bytes.
1376 if (inner_mac_off == 4) {
1378 } else if (inner_mac_off > 4) {
1379 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1382 /* We only support inner iPv4/ipv6. If we don't see the
1383 * correct protocol ID, it must be a loopback packet where
1384 * the offsets are off by 4.
1386 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1390 /* internal loopback packet, subtract all offsets by 4 */
1396 nw_off = inner_ip_off - ETH_HLEN;
1397 skb_set_network_header(skb, nw_off);
1398 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1399 struct ipv6hdr *iph = ipv6_hdr(skb);
1401 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1402 len = skb->len - skb_transport_offset(skb);
1404 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1406 struct iphdr *iph = ip_hdr(skb);
1408 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1409 len = skb->len - skb_transport_offset(skb);
1411 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1414 if (inner_mac_off) { /* tunnel */
1415 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1418 bnxt_gro_tunnel(skb, proto);
1424 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1425 int payload_off, int tcp_ts,
1426 struct sk_buff *skb)
1429 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1430 u32 hdr_info = tpa_info->hdr_info;
1431 int iphdr_len, nw_off;
1433 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1434 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1435 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1437 nw_off = inner_ip_off - ETH_HLEN;
1438 skb_set_network_header(skb, nw_off);
1439 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1440 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1441 skb_set_transport_header(skb, nw_off + iphdr_len);
1443 if (inner_mac_off) { /* tunnel */
1444 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1447 bnxt_gro_tunnel(skb, proto);
1453 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1454 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1456 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1457 int payload_off, int tcp_ts,
1458 struct sk_buff *skb)
1462 int len, nw_off, tcp_opt_len = 0;
1467 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1470 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1472 skb_set_network_header(skb, nw_off);
1474 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1475 len = skb->len - skb_transport_offset(skb);
1477 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1478 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1479 struct ipv6hdr *iph;
1481 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1483 skb_set_network_header(skb, nw_off);
1484 iph = ipv6_hdr(skb);
1485 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1486 len = skb->len - skb_transport_offset(skb);
1488 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1490 dev_kfree_skb_any(skb);
1494 if (nw_off) /* tunnel */
1495 bnxt_gro_tunnel(skb, skb->protocol);
1500 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1501 struct bnxt_tpa_info *tpa_info,
1502 struct rx_tpa_end_cmp *tpa_end,
1503 struct rx_tpa_end_cmp_ext *tpa_end1,
1504 struct sk_buff *skb)
1510 segs = TPA_END_TPA_SEGS(tpa_end);
1514 NAPI_GRO_CB(skb)->count = segs;
1515 skb_shinfo(skb)->gso_size =
1516 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1517 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1518 if (bp->flags & BNXT_FLAG_CHIP_P5)
1519 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1521 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1522 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1524 tcp_gro_complete(skb);
1529 /* Given the cfa_code of a received packet determine which
1530 * netdev (vf-rep or PF) the packet is destined to.
1532 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1534 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1536 /* if vf-rep dev is NULL, the must belongs to the PF */
1537 return dev ? dev : bp->dev;
1540 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1541 struct bnxt_cp_ring_info *cpr,
1543 struct rx_tpa_end_cmp *tpa_end,
1544 struct rx_tpa_end_cmp_ext *tpa_end1,
1547 struct bnxt_napi *bnapi = cpr->bnapi;
1548 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1549 u8 *data_ptr, agg_bufs;
1551 struct bnxt_tpa_info *tpa_info;
1553 struct sk_buff *skb;
1554 u16 idx = 0, agg_id;
1558 if (unlikely(bnapi->in_reset)) {
1559 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1562 return ERR_PTR(-EBUSY);
1566 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1567 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1568 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1569 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1570 tpa_info = &rxr->rx_tpa[agg_id];
1571 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1572 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1573 agg_bufs, tpa_info->agg_count);
1574 agg_bufs = tpa_info->agg_count;
1576 tpa_info->agg_count = 0;
1577 *event |= BNXT_AGG_EVENT;
1578 bnxt_free_agg_idx(rxr, agg_id);
1580 gro = !!(bp->flags & BNXT_FLAG_GRO);
1582 agg_id = TPA_END_AGG_ID(tpa_end);
1583 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1584 tpa_info = &rxr->rx_tpa[agg_id];
1585 idx = RING_CMP(*raw_cons);
1587 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1588 return ERR_PTR(-EBUSY);
1590 *event |= BNXT_AGG_EVENT;
1591 idx = NEXT_CMP(idx);
1593 gro = !!TPA_END_GRO(tpa_end);
1595 data = tpa_info->data;
1596 data_ptr = tpa_info->data_ptr;
1598 len = tpa_info->len;
1599 mapping = tpa_info->mapping;
1601 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1602 bnxt_abort_tpa(cpr, idx, agg_bufs);
1603 if (agg_bufs > MAX_SKB_FRAGS)
1604 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1605 agg_bufs, (int)MAX_SKB_FRAGS);
1609 if (len <= bp->rx_copy_thresh) {
1610 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1612 bnxt_abort_tpa(cpr, idx, agg_bufs);
1613 cpr->sw_stats.rx.rx_oom_discards += 1;
1618 dma_addr_t new_mapping;
1620 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
1622 bnxt_abort_tpa(cpr, idx, agg_bufs);
1623 cpr->sw_stats.rx.rx_oom_discards += 1;
1627 tpa_info->data = new_data;
1628 tpa_info->data_ptr = new_data + bp->rx_offset;
1629 tpa_info->mapping = new_mapping;
1631 skb = build_skb(data, bp->rx_buf_size);
1632 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1633 bp->rx_buf_use_size, bp->rx_dir,
1634 DMA_ATTR_WEAK_ORDERING);
1637 skb_free_frag(data);
1638 bnxt_abort_tpa(cpr, idx, agg_bufs);
1639 cpr->sw_stats.rx.rx_oom_discards += 1;
1642 skb_reserve(skb, bp->rx_offset);
1647 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1649 /* Page reuse already handled by bnxt_rx_pages(). */
1650 cpr->sw_stats.rx.rx_oom_discards += 1;
1656 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1658 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1659 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1661 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1662 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1663 __be16 vlan_proto = htons(tpa_info->metadata >>
1664 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1665 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1667 if (eth_type_vlan(vlan_proto)) {
1668 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1675 skb_checksum_none_assert(skb);
1676 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1677 skb->ip_summed = CHECKSUM_UNNECESSARY;
1679 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1683 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1688 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1689 struct rx_agg_cmp *rx_agg)
1691 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1692 struct bnxt_tpa_info *tpa_info;
1694 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1695 tpa_info = &rxr->rx_tpa[agg_id];
1696 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1697 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1700 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1701 struct sk_buff *skb)
1703 if (skb->dev != bp->dev) {
1704 /* this packet belongs to a vf-rep */
1705 bnxt_vf_rep_rx(bp, skb);
1708 skb_record_rx_queue(skb, bnapi->index);
1709 napi_gro_receive(&bnapi->napi, skb);
1712 /* returns the following:
1713 * 1 - 1 packet successfully received
1714 * 0 - successful TPA_START, packet not completed yet
1715 * -EBUSY - completion ring does not have all the agg buffers yet
1716 * -ENOMEM - packet aborted due to out of memory
1717 * -EIO - packet aborted due to hw error indicated in BD
1719 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1720 u32 *raw_cons, u8 *event)
1722 struct bnxt_napi *bnapi = cpr->bnapi;
1723 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1724 struct net_device *dev = bp->dev;
1725 struct rx_cmp *rxcmp;
1726 struct rx_cmp_ext *rxcmp1;
1727 u32 tmp_raw_cons = *raw_cons;
1728 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1729 struct bnxt_sw_rx_bd *rx_buf;
1731 u8 *data_ptr, agg_bufs, cmp_type;
1732 dma_addr_t dma_addr;
1733 struct sk_buff *skb;
1738 rxcmp = (struct rx_cmp *)
1739 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1741 cmp_type = RX_CMP_TYPE(rxcmp);
1743 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1744 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1745 goto next_rx_no_prod_no_len;
1748 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1749 cp_cons = RING_CMP(tmp_raw_cons);
1750 rxcmp1 = (struct rx_cmp_ext *)
1751 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1753 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1756 /* The valid test of the entry must be done first before
1757 * reading any further.
1760 prod = rxr->rx_prod;
1762 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1763 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1764 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1766 *event |= BNXT_RX_EVENT;
1767 goto next_rx_no_prod_no_len;
1769 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1770 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1771 (struct rx_tpa_end_cmp *)rxcmp,
1772 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1779 bnxt_deliver_skb(bp, bnapi, skb);
1782 *event |= BNXT_RX_EVENT;
1783 goto next_rx_no_prod_no_len;
1786 cons = rxcmp->rx_cmp_opaque;
1787 if (unlikely(cons != rxr->rx_next_cons)) {
1788 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1790 /* 0xffff is forced error, don't print it */
1791 if (rxr->rx_next_cons != 0xffff)
1792 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1793 cons, rxr->rx_next_cons);
1794 bnxt_sched_reset(bp, rxr);
1797 goto next_rx_no_prod_no_len;
1799 rx_buf = &rxr->rx_buf_ring[cons];
1800 data = rx_buf->data;
1801 data_ptr = rx_buf->data_ptr;
1804 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1805 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1808 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1811 cp_cons = NEXT_CMP(cp_cons);
1812 *event |= BNXT_AGG_EVENT;
1814 *event |= BNXT_RX_EVENT;
1816 rx_buf->data = NULL;
1817 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1818 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1820 bnxt_reuse_rx_data(rxr, cons, data);
1822 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1826 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1827 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1828 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1829 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1830 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1832 bnxt_sched_reset(bp, rxr);
1835 goto next_rx_no_len;
1838 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1839 len = flags >> RX_CMP_LEN_SHIFT;
1840 dma_addr = rx_buf->mapping;
1842 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1847 if (len <= bp->rx_copy_thresh) {
1848 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1849 bnxt_reuse_rx_data(rxr, cons, data);
1852 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1854 cpr->sw_stats.rx.rx_oom_discards += 1;
1861 if (rx_buf->data_ptr == data_ptr)
1862 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1865 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1868 cpr->sw_stats.rx.rx_oom_discards += 1;
1875 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1877 cpr->sw_stats.rx.rx_oom_discards += 1;
1883 if (RX_CMP_HASH_VALID(rxcmp)) {
1884 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1885 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1887 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1888 if (hash_type != 1 && hash_type != 3)
1889 type = PKT_HASH_TYPE_L3;
1890 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1893 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1894 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1896 if ((rxcmp1->rx_cmp_flags2 &
1897 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1898 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1899 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1900 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1901 __be16 vlan_proto = htons(meta_data >>
1902 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1904 if (eth_type_vlan(vlan_proto)) {
1905 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1912 skb_checksum_none_assert(skb);
1913 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1914 if (dev->features & NETIF_F_RXCSUM) {
1915 skb->ip_summed = CHECKSUM_UNNECESSARY;
1916 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1919 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1920 if (dev->features & NETIF_F_RXCSUM)
1921 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1925 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1926 RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1927 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1928 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1931 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1932 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1934 spin_lock_bh(&ptp->ptp_lock);
1935 ns = timecounter_cyc2time(&ptp->tc, ts);
1936 spin_unlock_bh(&ptp->ptp_lock);
1937 memset(skb_hwtstamps(skb), 0,
1938 sizeof(*skb_hwtstamps(skb)));
1939 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1943 bnxt_deliver_skb(bp, bnapi, skb);
1947 cpr->rx_packets += 1;
1948 cpr->rx_bytes += len;
1951 rxr->rx_prod = NEXT_RX(prod);
1952 rxr->rx_next_cons = NEXT_RX(cons);
1954 next_rx_no_prod_no_len:
1955 *raw_cons = tmp_raw_cons;
1960 /* In netpoll mode, if we are using a combined completion ring, we need to
1961 * discard the rx packets and recycle the buffers.
1963 static int bnxt_force_rx_discard(struct bnxt *bp,
1964 struct bnxt_cp_ring_info *cpr,
1965 u32 *raw_cons, u8 *event)
1967 u32 tmp_raw_cons = *raw_cons;
1968 struct rx_cmp_ext *rxcmp1;
1969 struct rx_cmp *rxcmp;
1974 cp_cons = RING_CMP(tmp_raw_cons);
1975 rxcmp = (struct rx_cmp *)
1976 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1978 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1979 cp_cons = RING_CMP(tmp_raw_cons);
1980 rxcmp1 = (struct rx_cmp_ext *)
1981 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1983 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1986 /* The valid test of the entry must be done first before
1987 * reading any further.
1990 cmp_type = RX_CMP_TYPE(rxcmp);
1991 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1992 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1993 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1994 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1995 struct rx_tpa_end_cmp_ext *tpa_end1;
1997 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1998 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1999 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2001 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2002 if (rc && rc != -EBUSY)
2003 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2007 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2009 struct bnxt_fw_health *fw_health = bp->fw_health;
2010 u32 reg = fw_health->regs[reg_idx];
2011 u32 reg_type, reg_off, val = 0;
2013 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2014 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2016 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2017 pci_read_config_dword(bp->pdev, reg_off, &val);
2019 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2020 reg_off = fw_health->mapped_regs[reg_idx];
2022 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2023 val = readl(bp->bar0 + reg_off);
2025 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2026 val = readl(bp->bar1 + reg_off);
2029 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2030 val &= fw_health->fw_reset_inprog_reg_mask;
2034 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2038 for (i = 0; i < bp->rx_nr_rings; i++) {
2039 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2040 struct bnxt_ring_grp_info *grp_info;
2042 grp_info = &bp->grp_info[grp_idx];
2043 if (grp_info->agg_fw_ring_id == ring_id)
2046 return INVALID_HW_RING_ID;
2049 static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2051 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2054 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2055 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2056 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2058 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2059 netdev_warn(bp->dev, "Pause Storm detected!\n");
2061 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2062 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2065 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2071 #define BNXT_GET_EVENT_PORT(data) \
2073 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2075 #define BNXT_EVENT_RING_TYPE(data2) \
2077 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2079 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2080 (BNXT_EVENT_RING_TYPE(data2) == \
2081 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2083 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2084 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2085 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2087 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2088 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2089 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2091 #define BNXT_PHC_BITS 48
2093 static int bnxt_async_event_process(struct bnxt *bp,
2094 struct hwrm_async_event_cmpl *cmpl)
2096 u16 event_id = le16_to_cpu(cmpl->event_id);
2097 u32 data1 = le32_to_cpu(cmpl->event_data1);
2098 u32 data2 = le32_to_cpu(cmpl->event_data2);
2100 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2101 event_id, data1, data2);
2103 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2105 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2106 struct bnxt_link_info *link_info = &bp->link_info;
2109 goto async_event_process_exit;
2111 /* print unsupported speed warning in forced speed mode only */
2112 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2113 (data1 & 0x20000)) {
2114 u16 fw_speed = link_info->force_link_speed;
2115 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2117 if (speed != SPEED_UNKNOWN)
2118 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2121 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2124 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2125 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2126 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2128 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2129 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2131 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2132 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2134 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2135 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2140 if (bp->pf.port_id != port_id)
2143 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2146 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2148 goto async_event_process_exit;
2149 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2151 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2152 char *type_str = "Solicited";
2155 goto async_event_process_exit;
2157 bp->fw_reset_timestamp = jiffies;
2158 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2159 if (!bp->fw_reset_min_dsecs)
2160 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2161 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2162 if (!bp->fw_reset_max_dsecs)
2163 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2164 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2165 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2166 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2168 bp->fw_health->fatalities++;
2169 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2170 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2171 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2172 type_str = "Non-fatal";
2173 bp->fw_health->survivals++;
2174 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2176 netif_warn(bp, hw, bp->dev,
2177 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2178 type_str, data1, data2,
2179 bp->fw_reset_min_dsecs * 100,
2180 bp->fw_reset_max_dsecs * 100);
2181 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2184 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2185 struct bnxt_fw_health *fw_health = bp->fw_health;
2186 char *status_desc = "healthy";
2190 goto async_event_process_exit;
2192 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2193 fw_health->enabled = false;
2194 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2197 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2198 fw_health->tmr_multiplier =
2199 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2200 bp->current_interval * 10);
2201 fw_health->tmr_counter = fw_health->tmr_multiplier;
2202 if (!fw_health->enabled)
2203 fw_health->last_fw_heartbeat =
2204 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2205 fw_health->last_fw_reset_cnt =
2206 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2207 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2208 if (status != BNXT_FW_STATUS_HEALTHY)
2209 status_desc = "unhealthy";
2210 netif_info(bp, drv, bp->dev,
2211 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2212 fw_health->primary ? "primary" : "backup", status,
2213 status_desc, fw_health->last_fw_reset_cnt);
2214 if (!fw_health->enabled) {
2215 /* Make sure tmr_counter is set and visible to
2216 * bnxt_health_check() before setting enabled to true.
2219 fw_health->enabled = true;
2221 goto async_event_process_exit;
2223 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2224 netif_notice(bp, hw, bp->dev,
2225 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2227 goto async_event_process_exit;
2228 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2229 struct bnxt_rx_ring_info *rxr;
2232 if (bp->flags & BNXT_FLAG_CHIP_P5)
2233 goto async_event_process_exit;
2235 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2236 BNXT_EVENT_RING_TYPE(data2), data1);
2237 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2238 goto async_event_process_exit;
2240 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2241 if (grp_idx == INVALID_HW_RING_ID) {
2242 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2244 goto async_event_process_exit;
2246 rxr = bp->bnapi[grp_idx]->rx_ring;
2247 bnxt_sched_reset(bp, rxr);
2248 goto async_event_process_exit;
2250 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2251 struct bnxt_fw_health *fw_health = bp->fw_health;
2253 netif_notice(bp, hw, bp->dev,
2254 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2257 fw_health->echo_req_data1 = data1;
2258 fw_health->echo_req_data2 = data2;
2259 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2262 goto async_event_process_exit;
2264 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2265 bnxt_ptp_pps_event(bp, data1, data2);
2266 goto async_event_process_exit;
2268 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2269 bnxt_event_error_report(bp, data1, data2);
2270 goto async_event_process_exit;
2272 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2273 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2274 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2275 if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
2276 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2279 spin_lock_bh(&ptp->ptp_lock);
2280 bnxt_ptp_update_current_time(bp);
2281 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2282 BNXT_PHC_BITS) | ptp->current_time);
2283 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2284 spin_unlock_bh(&ptp->ptp_lock);
2288 goto async_event_process_exit;
2290 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2291 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2293 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2294 goto async_event_process_exit;
2297 goto async_event_process_exit;
2299 bnxt_queue_sp_work(bp);
2300 async_event_process_exit:
2301 bnxt_ulp_async_events(bp, cmpl);
2305 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2307 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2308 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2309 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2310 (struct hwrm_fwd_req_cmpl *)txcmp;
2312 switch (cmpl_type) {
2313 case CMPL_BASE_TYPE_HWRM_DONE:
2314 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2315 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2318 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2319 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2321 if ((vf_id < bp->pf.first_vf_id) ||
2322 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2323 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2328 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2329 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2330 bnxt_queue_sp_work(bp);
2333 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2334 bnxt_async_event_process(bp,
2335 (struct hwrm_async_event_cmpl *)txcmp);
2345 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2347 struct bnxt_napi *bnapi = dev_instance;
2348 struct bnxt *bp = bnapi->bp;
2349 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2350 u32 cons = RING_CMP(cpr->cp_raw_cons);
2353 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2354 napi_schedule(&bnapi->napi);
2358 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2360 u32 raw_cons = cpr->cp_raw_cons;
2361 u16 cons = RING_CMP(raw_cons);
2362 struct tx_cmp *txcmp;
2364 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2366 return TX_CMP_VALID(txcmp, raw_cons);
2369 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2371 struct bnxt_napi *bnapi = dev_instance;
2372 struct bnxt *bp = bnapi->bp;
2373 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2374 u32 cons = RING_CMP(cpr->cp_raw_cons);
2377 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2379 if (!bnxt_has_work(bp, cpr)) {
2380 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2381 /* return if erroneous interrupt */
2382 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2386 /* disable ring IRQ */
2387 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2389 /* Return here if interrupt is shared and is disabled. */
2390 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2393 napi_schedule(&bnapi->napi);
2397 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2400 struct bnxt_napi *bnapi = cpr->bnapi;
2401 u32 raw_cons = cpr->cp_raw_cons;
2406 struct tx_cmp *txcmp;
2408 cpr->has_more_work = 0;
2409 cpr->had_work_done = 1;
2413 cons = RING_CMP(raw_cons);
2414 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2416 if (!TX_CMP_VALID(txcmp, raw_cons))
2419 /* The valid test of the entry must be done first before
2420 * reading any further.
2423 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2425 /* return full budget so NAPI will complete. */
2426 if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
2428 raw_cons = NEXT_RAW_CMP(raw_cons);
2430 cpr->has_more_work = 1;
2433 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2435 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2437 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2439 if (likely(rc >= 0))
2441 /* Increment rx_pkts when rc is -ENOMEM to count towards
2442 * the NAPI budget. Otherwise, we may potentially loop
2443 * here forever if we consistently cannot allocate
2446 else if (rc == -ENOMEM && budget)
2448 else if (rc == -EBUSY) /* partial completion */
2450 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2451 CMPL_BASE_TYPE_HWRM_DONE) ||
2452 (TX_CMP_TYPE(txcmp) ==
2453 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2454 (TX_CMP_TYPE(txcmp) ==
2455 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2456 bnxt_hwrm_handler(bp, txcmp);
2458 raw_cons = NEXT_RAW_CMP(raw_cons);
2460 if (rx_pkts && rx_pkts == budget) {
2461 cpr->has_more_work = 1;
2466 if (event & BNXT_REDIRECT_EVENT)
2469 if (event & BNXT_TX_EVENT) {
2470 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2471 u16 prod = txr->tx_prod;
2473 /* Sync BD data before updating doorbell */
2476 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2479 cpr->cp_raw_cons = raw_cons;
2480 bnapi->tx_pkts += tx_pkts;
2481 bnapi->events |= event;
2485 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2487 if (bnapi->tx_pkts) {
2488 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2492 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2493 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2495 if (bnapi->events & BNXT_AGG_EVENT)
2496 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2497 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2502 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2505 struct bnxt_napi *bnapi = cpr->bnapi;
2508 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2510 /* ACK completion ring before freeing tx ring and producing new
2511 * buffers in rx/agg rings to prevent overflowing the completion
2514 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2516 __bnxt_poll_work_done(bp, bnapi);
2520 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2522 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2523 struct bnxt *bp = bnapi->bp;
2524 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2525 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2526 struct tx_cmp *txcmp;
2527 struct rx_cmp_ext *rxcmp1;
2528 u32 cp_cons, tmp_raw_cons;
2529 u32 raw_cons = cpr->cp_raw_cons;
2536 cp_cons = RING_CMP(raw_cons);
2537 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2539 if (!TX_CMP_VALID(txcmp, raw_cons))
2542 /* The valid test of the entry must be done first before
2543 * reading any further.
2546 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2547 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2548 cp_cons = RING_CMP(tmp_raw_cons);
2549 rxcmp1 = (struct rx_cmp_ext *)
2550 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2552 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2555 /* force an error to recycle the buffer */
2556 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2557 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2559 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2560 if (likely(rc == -EIO) && budget)
2562 else if (rc == -EBUSY) /* partial completion */
2564 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2565 CMPL_BASE_TYPE_HWRM_DONE)) {
2566 bnxt_hwrm_handler(bp, txcmp);
2569 "Invalid completion received on special ring\n");
2571 raw_cons = NEXT_RAW_CMP(raw_cons);
2573 if (rx_pkts == budget)
2577 cpr->cp_raw_cons = raw_cons;
2578 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2579 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2581 if (event & BNXT_AGG_EVENT)
2582 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2584 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2585 napi_complete_done(napi, rx_pkts);
2586 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2591 static int bnxt_poll(struct napi_struct *napi, int budget)
2593 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2594 struct bnxt *bp = bnapi->bp;
2595 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2598 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2599 napi_complete(napi);
2603 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2605 if (work_done >= budget) {
2607 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2611 if (!bnxt_has_work(bp, cpr)) {
2612 if (napi_complete_done(napi, work_done))
2613 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2617 if (bp->flags & BNXT_FLAG_DIM) {
2618 struct dim_sample dim_sample = {};
2620 dim_update_sample(cpr->event_ctr,
2624 net_dim(&cpr->dim, dim_sample);
2629 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2631 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2632 int i, work_done = 0;
2634 for (i = 0; i < 2; i++) {
2635 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2638 work_done += __bnxt_poll_work(bp, cpr2,
2639 budget - work_done);
2640 cpr->has_more_work |= cpr2->has_more_work;
2646 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2649 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2652 for (i = 0; i < 2; i++) {
2653 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2654 struct bnxt_db_info *db;
2656 if (cpr2 && cpr2->had_work_done) {
2658 bnxt_writeq(bp, db->db_key64 | dbr_type |
2659 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2660 cpr2->had_work_done = 0;
2663 __bnxt_poll_work_done(bp, bnapi);
2666 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2668 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2669 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2670 struct bnxt_cp_ring_info *cpr_rx;
2671 u32 raw_cons = cpr->cp_raw_cons;
2672 struct bnxt *bp = bnapi->bp;
2673 struct nqe_cn *nqcmp;
2677 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2678 napi_complete(napi);
2681 if (cpr->has_more_work) {
2682 cpr->has_more_work = 0;
2683 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2686 cons = RING_CMP(raw_cons);
2687 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2689 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2690 if (cpr->has_more_work)
2693 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2694 cpr->cp_raw_cons = raw_cons;
2695 if (napi_complete_done(napi, work_done))
2696 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2701 /* The valid test of the entry must be done first before
2702 * reading any further.
2706 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2707 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2708 struct bnxt_cp_ring_info *cpr2;
2710 cpr2 = cpr->cp_ring_arr[idx];
2711 work_done += __bnxt_poll_work(bp, cpr2,
2712 budget - work_done);
2713 cpr->has_more_work |= cpr2->has_more_work;
2715 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2717 raw_cons = NEXT_RAW_CMP(raw_cons);
2719 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2720 if (raw_cons != cpr->cp_raw_cons) {
2721 cpr->cp_raw_cons = raw_cons;
2722 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2725 cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
2726 if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
2727 struct dim_sample dim_sample = {};
2729 dim_update_sample(cpr->event_ctr,
2733 net_dim(&cpr->dim, dim_sample);
2738 static void bnxt_free_tx_skbs(struct bnxt *bp)
2741 struct pci_dev *pdev = bp->pdev;
2746 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2747 for (i = 0; i < bp->tx_nr_rings; i++) {
2748 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2751 if (!txr->tx_buf_ring)
2754 for (j = 0; j < max_idx;) {
2755 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2756 struct sk_buff *skb;
2759 if (i < bp->tx_nr_rings_xdp &&
2760 tx_buf->action == XDP_REDIRECT) {
2761 dma_unmap_single(&pdev->dev,
2762 dma_unmap_addr(tx_buf, mapping),
2763 dma_unmap_len(tx_buf, len),
2765 xdp_return_frame(tx_buf->xdpf);
2767 tx_buf->xdpf = NULL;
2780 if (tx_buf->is_push) {
2786 dma_unmap_single(&pdev->dev,
2787 dma_unmap_addr(tx_buf, mapping),
2791 last = tx_buf->nr_frags;
2793 for (k = 0; k < last; k++, j++) {
2794 int ring_idx = j & bp->tx_ring_mask;
2795 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2797 tx_buf = &txr->tx_buf_ring[ring_idx];
2800 dma_unmap_addr(tx_buf, mapping),
2801 skb_frag_size(frag), DMA_TO_DEVICE);
2805 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2809 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2811 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2812 struct pci_dev *pdev = bp->pdev;
2813 struct bnxt_tpa_idx_map *map;
2814 int i, max_idx, max_agg_idx;
2816 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2817 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2819 goto skip_rx_tpa_free;
2821 for (i = 0; i < bp->max_tpa; i++) {
2822 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2823 u8 *data = tpa_info->data;
2828 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2829 bp->rx_buf_use_size, bp->rx_dir,
2830 DMA_ATTR_WEAK_ORDERING);
2832 tpa_info->data = NULL;
2834 skb_free_frag(data);
2838 if (!rxr->rx_buf_ring)
2839 goto skip_rx_buf_free;
2841 for (i = 0; i < max_idx; i++) {
2842 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2843 dma_addr_t mapping = rx_buf->mapping;
2844 void *data = rx_buf->data;
2849 rx_buf->data = NULL;
2850 if (BNXT_RX_PAGE_MODE(bp)) {
2851 mapping -= bp->rx_dma_offset;
2852 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2854 DMA_ATTR_WEAK_ORDERING);
2855 page_pool_recycle_direct(rxr->page_pool, data);
2857 dma_unmap_single_attrs(&pdev->dev, mapping,
2858 bp->rx_buf_use_size, bp->rx_dir,
2859 DMA_ATTR_WEAK_ORDERING);
2860 skb_free_frag(data);
2865 if (!rxr->rx_agg_ring)
2866 goto skip_rx_agg_free;
2868 for (i = 0; i < max_agg_idx; i++) {
2869 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2870 struct page *page = rx_agg_buf->page;
2875 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2876 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
2877 DMA_ATTR_WEAK_ORDERING);
2879 rx_agg_buf->page = NULL;
2880 __clear_bit(i, rxr->rx_agg_bmap);
2887 __free_page(rxr->rx_page);
2888 rxr->rx_page = NULL;
2890 map = rxr->rx_tpa_idx_map;
2892 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2895 static void bnxt_free_rx_skbs(struct bnxt *bp)
2902 for (i = 0; i < bp->rx_nr_rings; i++)
2903 bnxt_free_one_rx_ring_skbs(bp, i);
2906 static void bnxt_free_skbs(struct bnxt *bp)
2908 bnxt_free_tx_skbs(bp);
2909 bnxt_free_rx_skbs(bp);
2912 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2914 u8 init_val = mem_init->init_val;
2915 u16 offset = mem_init->offset;
2921 if (offset == BNXT_MEM_INVALID_OFFSET) {
2922 memset(p, init_val, len);
2925 for (i = 0; i < len; i += mem_init->size)
2926 *(p2 + i + offset) = init_val;
2929 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2931 struct pci_dev *pdev = bp->pdev;
2937 for (i = 0; i < rmem->nr_pages; i++) {
2938 if (!rmem->pg_arr[i])
2941 dma_free_coherent(&pdev->dev, rmem->page_size,
2942 rmem->pg_arr[i], rmem->dma_arr[i]);
2944 rmem->pg_arr[i] = NULL;
2948 size_t pg_tbl_size = rmem->nr_pages * 8;
2950 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2951 pg_tbl_size = rmem->page_size;
2952 dma_free_coherent(&pdev->dev, pg_tbl_size,
2953 rmem->pg_tbl, rmem->pg_tbl_map);
2954 rmem->pg_tbl = NULL;
2956 if (rmem->vmem_size && *rmem->vmem) {
2962 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2964 struct pci_dev *pdev = bp->pdev;
2968 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2969 valid_bit = PTU_PTE_VALID;
2970 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2971 size_t pg_tbl_size = rmem->nr_pages * 8;
2973 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2974 pg_tbl_size = rmem->page_size;
2975 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2982 for (i = 0; i < rmem->nr_pages; i++) {
2983 u64 extra_bits = valid_bit;
2985 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2989 if (!rmem->pg_arr[i])
2993 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2995 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2996 if (i == rmem->nr_pages - 2 &&
2997 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2998 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2999 else if (i == rmem->nr_pages - 1 &&
3000 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3001 extra_bits |= PTU_PTE_LAST;
3003 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3007 if (rmem->vmem_size) {
3008 *rmem->vmem = vzalloc(rmem->vmem_size);
3015 static void bnxt_free_tpa_info(struct bnxt *bp)
3019 for (i = 0; i < bp->rx_nr_rings; i++) {
3020 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3022 kfree(rxr->rx_tpa_idx_map);
3023 rxr->rx_tpa_idx_map = NULL;
3025 kfree(rxr->rx_tpa[0].agg_arr);
3026 rxr->rx_tpa[0].agg_arr = NULL;
3033 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3035 int i, j, total_aggs = 0;
3037 bp->max_tpa = MAX_TPA;
3038 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3039 if (!bp->max_tpa_v2)
3041 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3042 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
3045 for (i = 0; i < bp->rx_nr_rings; i++) {
3046 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3047 struct rx_agg_cmp *agg;
3049 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3054 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3056 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3057 rxr->rx_tpa[0].agg_arr = agg;
3060 for (j = 1; j < bp->max_tpa; j++)
3061 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
3062 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3064 if (!rxr->rx_tpa_idx_map)
3070 static void bnxt_free_rx_rings(struct bnxt *bp)
3077 bnxt_free_tpa_info(bp);
3078 for (i = 0; i < bp->rx_nr_rings; i++) {
3079 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3080 struct bnxt_ring_struct *ring;
3083 bpf_prog_put(rxr->xdp_prog);
3085 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3086 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3088 page_pool_destroy(rxr->page_pool);
3089 rxr->page_pool = NULL;
3091 kfree(rxr->rx_agg_bmap);
3092 rxr->rx_agg_bmap = NULL;
3094 ring = &rxr->rx_ring_struct;
3095 bnxt_free_ring(bp, &ring->ring_mem);
3097 ring = &rxr->rx_agg_ring_struct;
3098 bnxt_free_ring(bp, &ring->ring_mem);
3102 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3103 struct bnxt_rx_ring_info *rxr)
3105 struct page_pool_params pp = { 0 };
3107 pp.pool_size = bp->rx_ring_size;
3108 pp.nid = dev_to_node(&bp->pdev->dev);
3109 pp.dev = &bp->pdev->dev;
3110 pp.dma_dir = DMA_BIDIRECTIONAL;
3112 rxr->page_pool = page_pool_create(&pp);
3113 if (IS_ERR(rxr->page_pool)) {
3114 int err = PTR_ERR(rxr->page_pool);
3116 rxr->page_pool = NULL;
3122 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3124 int i, rc = 0, agg_rings = 0;
3129 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3132 for (i = 0; i < bp->rx_nr_rings; i++) {
3133 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3134 struct bnxt_ring_struct *ring;
3136 ring = &rxr->rx_ring_struct;
3138 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3142 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3146 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3150 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3154 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3162 ring = &rxr->rx_agg_ring_struct;
3163 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3168 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3169 mem_size = rxr->rx_agg_bmap_size / 8;
3170 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3171 if (!rxr->rx_agg_bmap)
3175 if (bp->flags & BNXT_FLAG_TPA)
3176 rc = bnxt_alloc_tpa_info(bp);
3180 static void bnxt_free_tx_rings(struct bnxt *bp)
3183 struct pci_dev *pdev = bp->pdev;
3188 for (i = 0; i < bp->tx_nr_rings; i++) {
3189 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3190 struct bnxt_ring_struct *ring;
3193 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3194 txr->tx_push, txr->tx_push_mapping);
3195 txr->tx_push = NULL;
3198 ring = &txr->tx_ring_struct;
3200 bnxt_free_ring(bp, &ring->ring_mem);
3204 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3207 struct pci_dev *pdev = bp->pdev;
3209 bp->tx_push_size = 0;
3210 if (bp->tx_push_thresh) {
3213 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3214 bp->tx_push_thresh);
3216 if (push_size > 256) {
3218 bp->tx_push_thresh = 0;
3221 bp->tx_push_size = push_size;
3224 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3225 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3226 struct bnxt_ring_struct *ring;
3229 ring = &txr->tx_ring_struct;
3231 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3235 ring->grp_idx = txr->bnapi->index;
3236 if (bp->tx_push_size) {
3239 /* One pre-allocated DMA buffer to backup
3242 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3244 &txr->tx_push_mapping,
3250 mapping = txr->tx_push_mapping +
3251 sizeof(struct tx_push_bd);
3252 txr->data_mapping = cpu_to_le64(mapping);
3254 qidx = bp->tc_to_qidx[j];
3255 ring->queue_id = bp->q_info[qidx].queue_id;
3256 if (i < bp->tx_nr_rings_xdp)
3258 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3264 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3266 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3268 kfree(cpr->cp_desc_ring);
3269 cpr->cp_desc_ring = NULL;
3270 ring->ring_mem.pg_arr = NULL;
3271 kfree(cpr->cp_desc_mapping);
3272 cpr->cp_desc_mapping = NULL;
3273 ring->ring_mem.dma_arr = NULL;
3276 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3278 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3279 if (!cpr->cp_desc_ring)
3281 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3283 if (!cpr->cp_desc_mapping)
3288 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3294 for (i = 0; i < bp->cp_nr_rings; i++) {
3295 struct bnxt_napi *bnapi = bp->bnapi[i];
3299 bnxt_free_cp_arrays(&bnapi->cp_ring);
3303 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3305 int i, n = bp->cp_nr_pages;
3307 for (i = 0; i < bp->cp_nr_rings; i++) {
3308 struct bnxt_napi *bnapi = bp->bnapi[i];
3313 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3320 static void bnxt_free_cp_rings(struct bnxt *bp)
3327 for (i = 0; i < bp->cp_nr_rings; i++) {
3328 struct bnxt_napi *bnapi = bp->bnapi[i];
3329 struct bnxt_cp_ring_info *cpr;
3330 struct bnxt_ring_struct *ring;
3336 cpr = &bnapi->cp_ring;
3337 ring = &cpr->cp_ring_struct;
3339 bnxt_free_ring(bp, &ring->ring_mem);
3341 for (j = 0; j < 2; j++) {
3342 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3345 ring = &cpr2->cp_ring_struct;
3346 bnxt_free_ring(bp, &ring->ring_mem);
3347 bnxt_free_cp_arrays(cpr2);
3349 cpr->cp_ring_arr[j] = NULL;
3355 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3357 struct bnxt_ring_mem_info *rmem;
3358 struct bnxt_ring_struct *ring;
3359 struct bnxt_cp_ring_info *cpr;
3362 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3366 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3368 bnxt_free_cp_arrays(cpr);
3372 ring = &cpr->cp_ring_struct;
3373 rmem = &ring->ring_mem;
3374 rmem->nr_pages = bp->cp_nr_pages;
3375 rmem->page_size = HW_CMPD_RING_SIZE;
3376 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3377 rmem->dma_arr = cpr->cp_desc_mapping;
3378 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3379 rc = bnxt_alloc_ring(bp, rmem);
3381 bnxt_free_ring(bp, rmem);
3382 bnxt_free_cp_arrays(cpr);
3389 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3391 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3392 int i, rc, ulp_base_vec, ulp_msix;
3394 ulp_msix = bnxt_get_ulp_msix_num(bp);
3395 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3396 for (i = 0; i < bp->cp_nr_rings; i++) {
3397 struct bnxt_napi *bnapi = bp->bnapi[i];
3398 struct bnxt_cp_ring_info *cpr;
3399 struct bnxt_ring_struct *ring;
3404 cpr = &bnapi->cp_ring;
3406 ring = &cpr->cp_ring_struct;
3408 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3412 if (ulp_msix && i >= ulp_base_vec)
3413 ring->map_idx = i + ulp_msix;
3417 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3420 if (i < bp->rx_nr_rings) {
3421 struct bnxt_cp_ring_info *cpr2 =
3422 bnxt_alloc_cp_sub_ring(bp);
3424 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3427 cpr2->bnapi = bnapi;
3429 if ((sh && i < bp->tx_nr_rings) ||
3430 (!sh && i >= bp->rx_nr_rings)) {
3431 struct bnxt_cp_ring_info *cpr2 =
3432 bnxt_alloc_cp_sub_ring(bp);
3434 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3437 cpr2->bnapi = bnapi;
3443 static void bnxt_init_ring_struct(struct bnxt *bp)
3447 for (i = 0; i < bp->cp_nr_rings; i++) {
3448 struct bnxt_napi *bnapi = bp->bnapi[i];
3449 struct bnxt_ring_mem_info *rmem;
3450 struct bnxt_cp_ring_info *cpr;
3451 struct bnxt_rx_ring_info *rxr;
3452 struct bnxt_tx_ring_info *txr;
3453 struct bnxt_ring_struct *ring;
3458 cpr = &bnapi->cp_ring;
3459 ring = &cpr->cp_ring_struct;
3460 rmem = &ring->ring_mem;
3461 rmem->nr_pages = bp->cp_nr_pages;
3462 rmem->page_size = HW_CMPD_RING_SIZE;
3463 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3464 rmem->dma_arr = cpr->cp_desc_mapping;
3465 rmem->vmem_size = 0;
3467 rxr = bnapi->rx_ring;
3471 ring = &rxr->rx_ring_struct;
3472 rmem = &ring->ring_mem;
3473 rmem->nr_pages = bp->rx_nr_pages;
3474 rmem->page_size = HW_RXBD_RING_SIZE;
3475 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3476 rmem->dma_arr = rxr->rx_desc_mapping;
3477 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3478 rmem->vmem = (void **)&rxr->rx_buf_ring;
3480 ring = &rxr->rx_agg_ring_struct;
3481 rmem = &ring->ring_mem;
3482 rmem->nr_pages = bp->rx_agg_nr_pages;
3483 rmem->page_size = HW_RXBD_RING_SIZE;
3484 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3485 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3486 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3487 rmem->vmem = (void **)&rxr->rx_agg_ring;
3490 txr = bnapi->tx_ring;
3494 ring = &txr->tx_ring_struct;
3495 rmem = &ring->ring_mem;
3496 rmem->nr_pages = bp->tx_nr_pages;
3497 rmem->page_size = HW_RXBD_RING_SIZE;
3498 rmem->pg_arr = (void **)txr->tx_desc_ring;
3499 rmem->dma_arr = txr->tx_desc_mapping;
3500 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3501 rmem->vmem = (void **)&txr->tx_buf_ring;
3505 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3509 struct rx_bd **rx_buf_ring;
3511 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3512 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3516 rxbd = rx_buf_ring[i];
3520 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3521 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3522 rxbd->rx_bd_opaque = prod;
3527 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3529 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3530 struct net_device *dev = bp->dev;
3534 prod = rxr->rx_prod;
3535 for (i = 0; i < bp->rx_ring_size; i++) {
3536 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3537 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3538 ring_nr, i, bp->rx_ring_size);
3541 prod = NEXT_RX(prod);
3543 rxr->rx_prod = prod;
3545 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3548 prod = rxr->rx_agg_prod;
3549 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3550 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3551 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3552 ring_nr, i, bp->rx_ring_size);
3555 prod = NEXT_RX_AGG(prod);
3557 rxr->rx_agg_prod = prod;
3563 for (i = 0; i < bp->max_tpa; i++) {
3564 data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
3568 rxr->rx_tpa[i].data = data;
3569 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3570 rxr->rx_tpa[i].mapping = mapping;
3576 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3578 struct bnxt_rx_ring_info *rxr;
3579 struct bnxt_ring_struct *ring;
3582 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3583 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3585 if (NET_IP_ALIGN == 2)
3586 type |= RX_BD_FLAGS_SOP;
3588 rxr = &bp->rx_ring[ring_nr];
3589 ring = &rxr->rx_ring_struct;
3590 bnxt_init_rxbd_pages(ring, type);
3592 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3593 bpf_prog_add(bp->xdp_prog, 1);
3594 rxr->xdp_prog = bp->xdp_prog;
3596 ring->fw_ring_id = INVALID_HW_RING_ID;
3598 ring = &rxr->rx_agg_ring_struct;
3599 ring->fw_ring_id = INVALID_HW_RING_ID;
3601 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3602 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3603 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3605 bnxt_init_rxbd_pages(ring, type);
3608 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3611 static void bnxt_init_cp_rings(struct bnxt *bp)
3615 for (i = 0; i < bp->cp_nr_rings; i++) {
3616 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3617 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3619 ring->fw_ring_id = INVALID_HW_RING_ID;
3620 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3621 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3622 for (j = 0; j < 2; j++) {
3623 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3628 ring = &cpr2->cp_ring_struct;
3629 ring->fw_ring_id = INVALID_HW_RING_ID;
3630 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3631 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3636 static int bnxt_init_rx_rings(struct bnxt *bp)
3640 if (BNXT_RX_PAGE_MODE(bp)) {
3641 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3642 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3644 bp->rx_offset = BNXT_RX_OFFSET;
3645 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3648 for (i = 0; i < bp->rx_nr_rings; i++) {
3649 rc = bnxt_init_one_rx_ring(bp, i);
3657 static int bnxt_init_tx_rings(struct bnxt *bp)
3661 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3662 BNXT_MIN_TX_DESC_CNT);
3664 for (i = 0; i < bp->tx_nr_rings; i++) {
3665 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3666 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3668 ring->fw_ring_id = INVALID_HW_RING_ID;
3674 static void bnxt_free_ring_grps(struct bnxt *bp)
3676 kfree(bp->grp_info);
3677 bp->grp_info = NULL;
3680 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3685 bp->grp_info = kcalloc(bp->cp_nr_rings,
3686 sizeof(struct bnxt_ring_grp_info),
3691 for (i = 0; i < bp->cp_nr_rings; i++) {
3693 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3694 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3695 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3696 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3697 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3702 static void bnxt_free_vnics(struct bnxt *bp)
3704 kfree(bp->vnic_info);
3705 bp->vnic_info = NULL;
3709 static int bnxt_alloc_vnics(struct bnxt *bp)
3713 #ifdef CONFIG_RFS_ACCEL
3714 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3715 num_vnics += bp->rx_nr_rings;
3718 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3721 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3726 bp->nr_vnics = num_vnics;
3730 static void bnxt_init_vnics(struct bnxt *bp)
3734 for (i = 0; i < bp->nr_vnics; i++) {
3735 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3738 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3739 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3740 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3742 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3744 if (bp->vnic_info[i].rss_hash_key) {
3746 prandom_bytes(vnic->rss_hash_key,
3749 memcpy(vnic->rss_hash_key,
3750 bp->vnic_info[0].rss_hash_key,
3756 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3760 pages = ring_size / desc_per_pg;
3767 while (pages & (pages - 1))
3773 void bnxt_set_tpa_flags(struct bnxt *bp)
3775 bp->flags &= ~BNXT_FLAG_TPA;
3776 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3778 if (bp->dev->features & NETIF_F_LRO)
3779 bp->flags |= BNXT_FLAG_LRO;
3780 else if (bp->dev->features & NETIF_F_GRO_HW)
3781 bp->flags |= BNXT_FLAG_GRO;
3784 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3787 void bnxt_set_ring_params(struct bnxt *bp)
3789 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3790 u32 agg_factor = 0, agg_ring_size = 0;
3792 /* 8 for CRC and VLAN */
3793 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3795 rx_space = rx_size + NET_SKB_PAD +
3796 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3798 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3799 ring_size = bp->rx_ring_size;
3800 bp->rx_agg_ring_size = 0;
3801 bp->rx_agg_nr_pages = 0;
3803 if (bp->flags & BNXT_FLAG_TPA)
3804 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3806 bp->flags &= ~BNXT_FLAG_JUMBO;
3807 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3810 bp->flags |= BNXT_FLAG_JUMBO;
3811 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3812 if (jumbo_factor > agg_factor)
3813 agg_factor = jumbo_factor;
3816 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3817 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3818 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3819 bp->rx_ring_size, ring_size);
3820 bp->rx_ring_size = ring_size;
3822 agg_ring_size = ring_size * agg_factor;
3824 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3826 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3827 u32 tmp = agg_ring_size;
3829 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3830 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3831 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3832 tmp, agg_ring_size);
3834 bp->rx_agg_ring_size = agg_ring_size;
3835 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3836 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3837 rx_space = rx_size + NET_SKB_PAD +
3838 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3841 bp->rx_buf_use_size = rx_size;
3842 bp->rx_buf_size = rx_space;
3844 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3845 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3847 ring_size = bp->tx_ring_size;
3848 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3849 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3851 max_rx_cmpl = bp->rx_ring_size;
3852 /* MAX TPA needs to be added because TPA_START completions are
3853 * immediately recycled, so the TPA completions are not bound by
3856 if (bp->flags & BNXT_FLAG_TPA)
3857 max_rx_cmpl += bp->max_tpa;
3858 /* RX and TPA completions are 32-byte, all others are 16-byte */
3859 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3860 bp->cp_ring_size = ring_size;
3862 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3863 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3864 bp->cp_nr_pages = MAX_CP_PAGES;
3865 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3866 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3867 ring_size, bp->cp_ring_size);
3869 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3870 bp->cp_ring_mask = bp->cp_bit - 1;
3873 /* Changing allocation mode of RX rings.
3874 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3876 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3879 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3882 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3883 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3884 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3885 bp->rx_dir = DMA_BIDIRECTIONAL;
3886 bp->rx_skb_func = bnxt_rx_page_skb;
3887 /* Disable LRO or GRO_HW */
3888 netdev_update_features(bp->dev);
3890 bp->dev->max_mtu = bp->max_mtu;
3891 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3892 bp->rx_dir = DMA_FROM_DEVICE;
3893 bp->rx_skb_func = bnxt_rx_skb;
3898 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3901 struct bnxt_vnic_info *vnic;
3902 struct pci_dev *pdev = bp->pdev;
3907 for (i = 0; i < bp->nr_vnics; i++) {
3908 vnic = &bp->vnic_info[i];
3910 kfree(vnic->fw_grp_ids);
3911 vnic->fw_grp_ids = NULL;
3913 kfree(vnic->uc_list);
3914 vnic->uc_list = NULL;
3916 if (vnic->mc_list) {
3917 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3918 vnic->mc_list, vnic->mc_list_mapping);
3919 vnic->mc_list = NULL;
3922 if (vnic->rss_table) {
3923 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3925 vnic->rss_table_dma_addr);
3926 vnic->rss_table = NULL;
3929 vnic->rss_hash_key = NULL;
3934 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3936 int i, rc = 0, size;
3937 struct bnxt_vnic_info *vnic;
3938 struct pci_dev *pdev = bp->pdev;
3941 for (i = 0; i < bp->nr_vnics; i++) {
3942 vnic = &bp->vnic_info[i];
3944 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3945 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3948 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3949 if (!vnic->uc_list) {
3956 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3957 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3959 dma_alloc_coherent(&pdev->dev,
3961 &vnic->mc_list_mapping,
3963 if (!vnic->mc_list) {
3969 if (bp->flags & BNXT_FLAG_CHIP_P5)
3970 goto vnic_skip_grps;
3972 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3973 max_rings = bp->rx_nr_rings;
3977 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3978 if (!vnic->fw_grp_ids) {
3983 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3984 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3987 /* Allocate rss table and hash key */
3988 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3989 if (bp->flags & BNXT_FLAG_CHIP_P5)
3990 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3992 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3993 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3994 vnic->rss_table_size,
3995 &vnic->rss_table_dma_addr,
3997 if (!vnic->rss_table) {
4002 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4003 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4011 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4013 struct bnxt_hwrm_wait_token *token;
4015 dma_pool_destroy(bp->hwrm_dma_pool);
4016 bp->hwrm_dma_pool = NULL;
4019 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4020 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4024 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4026 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4028 BNXT_HWRM_DMA_ALIGN, 0);
4029 if (!bp->hwrm_dma_pool)
4032 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4037 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4039 kfree(stats->hw_masks);
4040 stats->hw_masks = NULL;
4041 kfree(stats->sw_stats);
4042 stats->sw_stats = NULL;
4043 if (stats->hw_stats) {
4044 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4045 stats->hw_stats_map);
4046 stats->hw_stats = NULL;
4050 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4053 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4054 &stats->hw_stats_map, GFP_KERNEL);
4055 if (!stats->hw_stats)
4058 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4059 if (!stats->sw_stats)
4063 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4064 if (!stats->hw_masks)
4070 bnxt_free_stats_mem(bp, stats);
4074 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4078 for (i = 0; i < count; i++)
4082 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4086 for (i = 0; i < count; i++)
4087 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4090 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4091 struct bnxt_stats_mem *stats)
4093 struct hwrm_func_qstats_ext_output *resp;
4094 struct hwrm_func_qstats_ext_input *req;
4098 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4099 !(bp->flags & BNXT_FLAG_CHIP_P5))
4102 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4106 req->fid = cpu_to_le16(0xffff);
4107 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4109 resp = hwrm_req_hold(bp, req);
4110 rc = hwrm_req_send(bp, req);
4112 hw_masks = &resp->rx_ucast_pkts;
4113 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4115 hwrm_req_drop(bp, req);
4119 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4120 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4122 static void bnxt_init_stats(struct bnxt *bp)
4124 struct bnxt_napi *bnapi = bp->bnapi[0];
4125 struct bnxt_cp_ring_info *cpr;
4126 struct bnxt_stats_mem *stats;
4127 __le64 *rx_stats, *tx_stats;
4128 int rc, rx_count, tx_count;
4129 u64 *rx_masks, *tx_masks;
4133 cpr = &bnapi->cp_ring;
4134 stats = &cpr->stats;
4135 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4137 if (bp->flags & BNXT_FLAG_CHIP_P5)
4138 mask = (1ULL << 48) - 1;
4141 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4143 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4144 stats = &bp->port_stats;
4145 rx_stats = stats->hw_stats;
4146 rx_masks = stats->hw_masks;
4147 rx_count = sizeof(struct rx_port_stats) / 8;
4148 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4149 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4150 tx_count = sizeof(struct tx_port_stats) / 8;
4152 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4153 rc = bnxt_hwrm_port_qstats(bp, flags);
4155 mask = (1ULL << 40) - 1;
4157 bnxt_fill_masks(rx_masks, mask, rx_count);
4158 bnxt_fill_masks(tx_masks, mask, tx_count);
4160 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4161 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4162 bnxt_hwrm_port_qstats(bp, 0);
4165 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4166 stats = &bp->rx_port_stats_ext;
4167 rx_stats = stats->hw_stats;
4168 rx_masks = stats->hw_masks;
4169 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4170 stats = &bp->tx_port_stats_ext;
4171 tx_stats = stats->hw_stats;
4172 tx_masks = stats->hw_masks;
4173 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4175 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4176 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4178 mask = (1ULL << 40) - 1;
4180 bnxt_fill_masks(rx_masks, mask, rx_count);
4182 bnxt_fill_masks(tx_masks, mask, tx_count);
4184 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4186 bnxt_copy_hw_masks(tx_masks, tx_stats,
4188 bnxt_hwrm_port_qstats_ext(bp, 0);
4193 static void bnxt_free_port_stats(struct bnxt *bp)
4195 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4196 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4198 bnxt_free_stats_mem(bp, &bp->port_stats);
4199 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4200 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4203 static void bnxt_free_ring_stats(struct bnxt *bp)
4210 for (i = 0; i < bp->cp_nr_rings; i++) {
4211 struct bnxt_napi *bnapi = bp->bnapi[i];
4212 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4214 bnxt_free_stats_mem(bp, &cpr->stats);
4218 static int bnxt_alloc_stats(struct bnxt *bp)
4223 size = bp->hw_ring_stats_size;
4225 for (i = 0; i < bp->cp_nr_rings; i++) {
4226 struct bnxt_napi *bnapi = bp->bnapi[i];
4227 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4229 cpr->stats.len = size;
4230 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4234 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4237 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4240 if (bp->port_stats.hw_stats)
4241 goto alloc_ext_stats;
4243 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4244 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4248 bp->flags |= BNXT_FLAG_PORT_STATS;
4251 /* Display extended statistics only if FW supports it */
4252 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4253 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4256 if (bp->rx_port_stats_ext.hw_stats)
4257 goto alloc_tx_ext_stats;
4259 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4260 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4261 /* Extended stats are optional */
4266 if (bp->tx_port_stats_ext.hw_stats)
4269 if (bp->hwrm_spec_code >= 0x10902 ||
4270 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4271 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4272 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4273 /* Extended stats are optional */
4277 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4281 static void bnxt_clear_ring_indices(struct bnxt *bp)
4288 for (i = 0; i < bp->cp_nr_rings; i++) {
4289 struct bnxt_napi *bnapi = bp->bnapi[i];
4290 struct bnxt_cp_ring_info *cpr;
4291 struct bnxt_rx_ring_info *rxr;
4292 struct bnxt_tx_ring_info *txr;
4297 cpr = &bnapi->cp_ring;
4298 cpr->cp_raw_cons = 0;
4300 txr = bnapi->tx_ring;
4306 rxr = bnapi->rx_ring;
4309 rxr->rx_agg_prod = 0;
4310 rxr->rx_sw_agg_prod = 0;
4311 rxr->rx_next_cons = 0;
4316 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4318 #ifdef CONFIG_RFS_ACCEL
4321 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4322 * safe to delete the hash table.
4324 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4325 struct hlist_head *head;
4326 struct hlist_node *tmp;
4327 struct bnxt_ntuple_filter *fltr;
4329 head = &bp->ntp_fltr_hash_tbl[i];
4330 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4331 hlist_del(&fltr->hash);
4336 kfree(bp->ntp_fltr_bmap);
4337 bp->ntp_fltr_bmap = NULL;
4339 bp->ntp_fltr_count = 0;
4343 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4345 #ifdef CONFIG_RFS_ACCEL
4348 if (!(bp->flags & BNXT_FLAG_RFS))
4351 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4352 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4354 bp->ntp_fltr_count = 0;
4355 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4359 if (!bp->ntp_fltr_bmap)
4368 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4370 bnxt_free_vnic_attributes(bp);
4371 bnxt_free_tx_rings(bp);
4372 bnxt_free_rx_rings(bp);
4373 bnxt_free_cp_rings(bp);
4374 bnxt_free_all_cp_arrays(bp);
4375 bnxt_free_ntp_fltrs(bp, irq_re_init);
4377 bnxt_free_ring_stats(bp);
4378 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4379 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4380 bnxt_free_port_stats(bp);
4381 bnxt_free_ring_grps(bp);
4382 bnxt_free_vnics(bp);
4383 kfree(bp->tx_ring_map);
4384 bp->tx_ring_map = NULL;
4392 bnxt_clear_ring_indices(bp);
4396 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4398 int i, j, rc, size, arr_size;
4402 /* Allocate bnapi mem pointer array and mem block for
4405 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4407 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4408 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4414 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4415 bp->bnapi[i] = bnapi;
4416 bp->bnapi[i]->index = i;
4417 bp->bnapi[i]->bp = bp;
4418 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4419 struct bnxt_cp_ring_info *cpr =
4420 &bp->bnapi[i]->cp_ring;
4422 cpr->cp_ring_struct.ring_mem.flags =
4423 BNXT_RMEM_RING_PTE_FLAG;
4427 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4428 sizeof(struct bnxt_rx_ring_info),
4433 for (i = 0; i < bp->rx_nr_rings; i++) {
4434 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4436 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4437 rxr->rx_ring_struct.ring_mem.flags =
4438 BNXT_RMEM_RING_PTE_FLAG;
4439 rxr->rx_agg_ring_struct.ring_mem.flags =
4440 BNXT_RMEM_RING_PTE_FLAG;
4442 rxr->bnapi = bp->bnapi[i];
4443 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4446 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4447 sizeof(struct bnxt_tx_ring_info),
4452 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4455 if (!bp->tx_ring_map)
4458 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4461 j = bp->rx_nr_rings;
4463 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4464 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4466 if (bp->flags & BNXT_FLAG_CHIP_P5)
4467 txr->tx_ring_struct.ring_mem.flags =
4468 BNXT_RMEM_RING_PTE_FLAG;
4469 txr->bnapi = bp->bnapi[j];
4470 bp->bnapi[j]->tx_ring = txr;
4471 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4472 if (i >= bp->tx_nr_rings_xdp) {
4473 txr->txq_index = i - bp->tx_nr_rings_xdp;
4474 bp->bnapi[j]->tx_int = bnxt_tx_int;
4476 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4477 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4481 rc = bnxt_alloc_stats(bp);
4484 bnxt_init_stats(bp);
4486 rc = bnxt_alloc_ntp_fltrs(bp);
4490 rc = bnxt_alloc_vnics(bp);
4495 rc = bnxt_alloc_all_cp_arrays(bp);
4499 bnxt_init_ring_struct(bp);
4501 rc = bnxt_alloc_rx_rings(bp);
4505 rc = bnxt_alloc_tx_rings(bp);
4509 rc = bnxt_alloc_cp_rings(bp);
4513 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4514 BNXT_VNIC_UCAST_FLAG;
4515 rc = bnxt_alloc_vnic_attributes(bp);
4521 bnxt_free_mem(bp, true);
4525 static void bnxt_disable_int(struct bnxt *bp)
4532 for (i = 0; i < bp->cp_nr_rings; i++) {
4533 struct bnxt_napi *bnapi = bp->bnapi[i];
4534 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4535 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4537 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4538 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4542 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4544 struct bnxt_napi *bnapi = bp->bnapi[n];
4545 struct bnxt_cp_ring_info *cpr;
4547 cpr = &bnapi->cp_ring;
4548 return cpr->cp_ring_struct.map_idx;
4551 static void bnxt_disable_int_sync(struct bnxt *bp)
4558 atomic_inc(&bp->intr_sem);
4560 bnxt_disable_int(bp);
4561 for (i = 0; i < bp->cp_nr_rings; i++) {
4562 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4564 synchronize_irq(bp->irq_tbl[map_idx].vector);
4568 static void bnxt_enable_int(struct bnxt *bp)
4572 atomic_set(&bp->intr_sem, 0);
4573 for (i = 0; i < bp->cp_nr_rings; i++) {
4574 struct bnxt_napi *bnapi = bp->bnapi[i];
4575 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4577 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4581 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4584 DECLARE_BITMAP(async_events_bmap, 256);
4585 u32 *events = (u32 *)async_events_bmap;
4586 struct hwrm_func_drv_rgtr_output *resp;
4587 struct hwrm_func_drv_rgtr_input *req;
4591 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4595 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4596 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4597 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4599 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4600 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4601 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4602 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4603 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4604 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4605 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4606 req->flags = cpu_to_le32(flags);
4607 req->ver_maj_8b = DRV_VER_MAJ;
4608 req->ver_min_8b = DRV_VER_MIN;
4609 req->ver_upd_8b = DRV_VER_UPD;
4610 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4611 req->ver_min = cpu_to_le16(DRV_VER_MIN);
4612 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
4618 memset(data, 0, sizeof(data));
4619 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4620 u16 cmd = bnxt_vf_req_snif[i];
4621 unsigned int bit, idx;
4625 data[idx] |= 1 << bit;
4628 for (i = 0; i < 8; i++)
4629 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
4632 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4635 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4636 req->flags |= cpu_to_le32(
4637 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4639 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4640 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4641 u16 event_id = bnxt_async_events_arr[i];
4643 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4644 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4646 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4648 if (bmap && bmap_size) {
4649 for (i = 0; i < bmap_size; i++) {
4650 if (test_bit(i, bmap))
4651 __set_bit(i, async_events_bmap);
4654 for (i = 0; i < 8; i++)
4655 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
4659 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4661 resp = hwrm_req_hold(bp, req);
4662 rc = hwrm_req_send(bp, req);
4664 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4666 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4667 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4669 hwrm_req_drop(bp, req);
4673 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4675 struct hwrm_func_drv_unrgtr_input *req;
4678 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4681 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4684 return hwrm_req_send(bp, req);
4687 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4689 struct hwrm_tunnel_dst_port_free_input *req;
4692 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
4693 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
4695 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
4696 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
4699 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4703 req->tunnel_type = tunnel_type;
4705 switch (tunnel_type) {
4706 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4707 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4709 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4711 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4712 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4714 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4720 rc = hwrm_req_send(bp, req);
4722 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4727 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4730 struct hwrm_tunnel_dst_port_alloc_output *resp;
4731 struct hwrm_tunnel_dst_port_alloc_input *req;
4734 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4738 req->tunnel_type = tunnel_type;
4739 req->tunnel_dst_port_val = port;
4741 resp = hwrm_req_hold(bp, req);
4742 rc = hwrm_req_send(bp, req);
4744 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4749 switch (tunnel_type) {
4750 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4751 bp->vxlan_port = port;
4752 bp->vxlan_fw_dst_port_id =
4753 le16_to_cpu(resp->tunnel_dst_port_id);
4755 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4756 bp->nge_port = port;
4757 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4764 hwrm_req_drop(bp, req);
4768 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4770 struct hwrm_cfa_l2_set_rx_mask_input *req;
4771 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4774 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4778 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4779 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
4780 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4781 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4783 req->mask = cpu_to_le32(vnic->rx_mask);
4784 return hwrm_req_send_silent(bp, req);
4787 #ifdef CONFIG_RFS_ACCEL
4788 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4789 struct bnxt_ntuple_filter *fltr)
4791 struct hwrm_cfa_ntuple_filter_free_input *req;
4794 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4798 req->ntuple_filter_id = fltr->filter_id;
4799 return hwrm_req_send(bp, req);
4802 #define BNXT_NTP_FLTR_FLAGS \
4803 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4804 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4805 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4806 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4807 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4808 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4809 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4810 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4811 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4812 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4813 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4814 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4815 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
4816 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4818 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
4819 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4821 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4822 struct bnxt_ntuple_filter *fltr)
4824 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4825 struct hwrm_cfa_ntuple_filter_alloc_input *req;
4826 struct flow_keys *keys = &fltr->fkeys;
4827 struct bnxt_vnic_info *vnic;
4831 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4835 req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4837 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4838 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4839 req->dst_id = cpu_to_le16(fltr->rxq);
4841 vnic = &bp->vnic_info[fltr->rxq + 1];
4842 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
4844 req->flags = cpu_to_le32(flags);
4845 req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4847 req->ethertype = htons(ETH_P_IP);
4848 memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4849 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4850 req->ip_protocol = keys->basic.ip_proto;
4852 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4855 req->ethertype = htons(ETH_P_IPV6);
4857 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4858 *(struct in6_addr *)&req->src_ipaddr[0] =
4859 keys->addrs.v6addrs.src;
4860 *(struct in6_addr *)&req->dst_ipaddr[0] =
4861 keys->addrs.v6addrs.dst;
4862 for (i = 0; i < 4; i++) {
4863 req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4864 req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4867 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
4868 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4869 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4870 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4872 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4873 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4875 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4878 req->src_port = keys->ports.src;
4879 req->src_port_mask = cpu_to_be16(0xffff);
4880 req->dst_port = keys->ports.dst;
4881 req->dst_port_mask = cpu_to_be16(0xffff);
4883 resp = hwrm_req_hold(bp, req);
4884 rc = hwrm_req_send(bp, req);
4886 fltr->filter_id = resp->ntuple_filter_id;
4887 hwrm_req_drop(bp, req);
4892 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4895 struct hwrm_cfa_l2_filter_alloc_output *resp;
4896 struct hwrm_cfa_l2_filter_alloc_input *req;
4899 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
4903 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4904 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4906 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4907 req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4909 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4910 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4911 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4912 memcpy(req->l2_addr, mac_addr, ETH_ALEN);
4913 req->l2_addr_mask[0] = 0xff;
4914 req->l2_addr_mask[1] = 0xff;
4915 req->l2_addr_mask[2] = 0xff;
4916 req->l2_addr_mask[3] = 0xff;
4917 req->l2_addr_mask[4] = 0xff;
4918 req->l2_addr_mask[5] = 0xff;
4920 resp = hwrm_req_hold(bp, req);
4921 rc = hwrm_req_send(bp, req);
4923 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4925 hwrm_req_drop(bp, req);
4929 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4931 struct hwrm_cfa_l2_filter_free_input *req;
4932 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4935 /* Any associated ntuple filters will also be cleared by firmware. */
4936 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
4939 hwrm_req_hold(bp, req);
4940 for (i = 0; i < num_of_vnics; i++) {
4941 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4943 for (j = 0; j < vnic->uc_filter_count; j++) {
4944 req->l2_filter_id = vnic->fw_l2_filter_id[j];
4946 rc = hwrm_req_send(bp, req);
4948 vnic->uc_filter_count = 0;
4950 hwrm_req_drop(bp, req);
4954 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4956 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4957 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4958 struct hwrm_vnic_tpa_cfg_input *req;
4961 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4964 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
4969 u16 mss = bp->dev->mtu - 40;
4970 u32 nsegs, n, segs = 0, flags;
4972 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4973 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4974 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4975 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4976 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4977 if (tpa_flags & BNXT_FLAG_GRO)
4978 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4980 req->flags = cpu_to_le32(flags);
4983 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4984 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4985 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4987 /* Number of segs are log2 units, and first packet is not
4988 * included as part of this units.
4990 if (mss <= BNXT_RX_PAGE_SIZE) {
4991 n = BNXT_RX_PAGE_SIZE / mss;
4992 nsegs = (MAX_SKB_FRAGS - 1) * n;
4994 n = mss / BNXT_RX_PAGE_SIZE;
4995 if (mss & (BNXT_RX_PAGE_SIZE - 1))
4997 nsegs = (MAX_SKB_FRAGS - n) / n;
5000 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5001 segs = MAX_TPA_SEGS_P5;
5002 max_aggs = bp->max_tpa;
5004 segs = ilog2(nsegs);
5006 req->max_agg_segs = cpu_to_le16(segs);
5007 req->max_aggs = cpu_to_le16(max_aggs);
5009 req->min_agg_len = cpu_to_le32(512);
5011 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5013 return hwrm_req_send(bp, req);
5016 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5018 struct bnxt_ring_grp_info *grp_info;
5020 grp_info = &bp->grp_info[ring->grp_idx];
5021 return grp_info->cp_fw_ring_id;
5024 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5026 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5027 struct bnxt_napi *bnapi = rxr->bnapi;
5028 struct bnxt_cp_ring_info *cpr;
5030 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5031 return cpr->cp_ring_struct.fw_ring_id;
5033 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5037 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5039 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5040 struct bnxt_napi *bnapi = txr->bnapi;
5041 struct bnxt_cp_ring_info *cpr;
5043 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5044 return cpr->cp_ring_struct.fw_ring_id;
5046 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5050 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5054 if (bp->flags & BNXT_FLAG_CHIP_P5)
5055 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5057 entries = HW_HASH_INDEX_SIZE;
5059 bp->rss_indir_tbl_entries = entries;
5060 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5062 if (!bp->rss_indir_tbl)
5067 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5069 u16 max_rings, max_entries, pad, i;
5071 if (!bp->rx_nr_rings)
5074 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5075 max_rings = bp->rx_nr_rings - 1;
5077 max_rings = bp->rx_nr_rings;
5079 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5081 for (i = 0; i < max_entries; i++)
5082 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5084 pad = bp->rss_indir_tbl_entries - max_entries;
5086 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5089 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5091 u16 i, tbl_size, max_ring = 0;
5093 if (!bp->rss_indir_tbl)
5096 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5097 for (i = 0; i < tbl_size; i++)
5098 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5102 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5104 if (bp->flags & BNXT_FLAG_CHIP_P5)
5105 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5106 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5111 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5113 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5116 /* Fill the RSS indirection table with ring group ids */
5117 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5119 j = bp->rss_indir_tbl[i];
5120 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5124 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5125 struct bnxt_vnic_info *vnic)
5127 __le16 *ring_tbl = vnic->rss_table;
5128 struct bnxt_rx_ring_info *rxr;
5131 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5133 for (i = 0; i < tbl_size; i++) {
5136 j = bp->rss_indir_tbl[i];
5137 rxr = &bp->rx_ring[j];
5139 ring_id = rxr->rx_ring_struct.fw_ring_id;
5140 *ring_tbl++ = cpu_to_le16(ring_id);
5141 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5142 *ring_tbl++ = cpu_to_le16(ring_id);
5146 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5148 if (bp->flags & BNXT_FLAG_CHIP_P5)
5149 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5151 __bnxt_fill_hw_rss_tbl(bp, vnic);
5154 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5156 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5157 struct hwrm_vnic_rss_cfg_input *req;
5160 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5161 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5164 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5169 bnxt_fill_hw_rss_tbl(bp, vnic);
5170 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5171 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5172 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5173 req->hash_key_tbl_addr =
5174 cpu_to_le64(vnic->rss_hash_key_dma_addr);
5176 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5177 return hwrm_req_send(bp, req);
5180 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5182 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5183 struct hwrm_vnic_rss_cfg_input *req;
5184 dma_addr_t ring_tbl_map;
5188 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5192 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5194 return hwrm_req_send(bp, req);
5196 bnxt_fill_hw_rss_tbl(bp, vnic);
5197 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5198 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5199 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5200 ring_tbl_map = vnic->rss_table_dma_addr;
5201 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5203 hwrm_req_hold(bp, req);
5204 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5205 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5206 req->ring_table_pair_index = i;
5207 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5208 rc = hwrm_req_send(bp, req);
5214 hwrm_req_drop(bp, req);
5218 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5220 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5221 struct hwrm_vnic_plcmodes_cfg_input *req;
5224 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5228 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5229 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5230 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5232 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5233 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5234 /* thresholds not implemented in firmware yet */
5235 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5236 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5237 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5238 return hwrm_req_send(bp, req);
5241 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5244 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
5246 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5249 req->rss_cos_lb_ctx_id =
5250 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5252 hwrm_req_send(bp, req);
5253 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5256 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5260 for (i = 0; i < bp->nr_vnics; i++) {
5261 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5263 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5264 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5265 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5268 bp->rsscos_nr_ctxs = 0;
5271 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5273 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5274 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
5277 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5281 resp = hwrm_req_hold(bp, req);
5282 rc = hwrm_req_send(bp, req);
5284 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5285 le16_to_cpu(resp->rss_cos_lb_ctx_id);
5286 hwrm_req_drop(bp, req);
5291 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5293 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5294 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5295 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5298 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5300 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5301 struct hwrm_vnic_cfg_input *req;
5302 unsigned int ring = 0, grp_idx;
5306 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5310 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5311 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5313 req->default_rx_ring_id =
5314 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5315 req->default_cmpl_ring_id =
5316 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5318 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5319 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5322 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5323 /* Only RSS support for now TBD: COS & LB */
5324 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5325 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5326 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5327 VNIC_CFG_REQ_ENABLES_MRU);
5328 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5330 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5331 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5332 VNIC_CFG_REQ_ENABLES_MRU);
5333 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5335 req->rss_rule = cpu_to_le16(0xffff);
5338 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5339 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5340 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5341 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5343 req->cos_rule = cpu_to_le16(0xffff);
5346 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5348 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5350 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5351 ring = bp->rx_nr_rings - 1;
5353 grp_idx = bp->rx_ring[ring].bnapi->index;
5354 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5355 req->lb_rule = cpu_to_le16(0xffff);
5357 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5359 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5360 #ifdef CONFIG_BNXT_SRIOV
5362 def_vlan = bp->vf.vlan;
5364 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5365 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5366 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5367 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5369 return hwrm_req_send(bp, req);
5372 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5374 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5375 struct hwrm_vnic_free_input *req;
5377 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5381 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5383 hwrm_req_send(bp, req);
5384 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5388 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5392 for (i = 0; i < bp->nr_vnics; i++)
5393 bnxt_hwrm_vnic_free_one(bp, i);
5396 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5397 unsigned int start_rx_ring_idx,
5398 unsigned int nr_rings)
5400 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5401 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5402 struct hwrm_vnic_alloc_output *resp;
5403 struct hwrm_vnic_alloc_input *req;
5406 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5410 if (bp->flags & BNXT_FLAG_CHIP_P5)
5411 goto vnic_no_ring_grps;
5413 /* map ring groups to this vnic */
5414 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5415 grp_idx = bp->rx_ring[i].bnapi->index;
5416 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5417 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5421 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5425 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5426 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5428 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5430 resp = hwrm_req_hold(bp, req);
5431 rc = hwrm_req_send(bp, req);
5433 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5434 hwrm_req_drop(bp, req);
5438 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5440 struct hwrm_vnic_qcaps_output *resp;
5441 struct hwrm_vnic_qcaps_input *req;
5444 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5445 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5446 if (bp->hwrm_spec_code < 0x10600)
5449 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5453 resp = hwrm_req_hold(bp, req);
5454 rc = hwrm_req_send(bp, req);
5456 u32 flags = le32_to_cpu(resp->flags);
5458 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5459 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5460 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5462 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5463 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5465 /* Older P5 fw before EXT_HW_STATS support did not set
5466 * VLAN_STRIP_CAP properly.
5468 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5469 (BNXT_CHIP_P5_THOR(bp) &&
5470 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5471 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5472 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5473 if (bp->max_tpa_v2) {
5474 if (BNXT_CHIP_P5_THOR(bp))
5475 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5477 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5480 hwrm_req_drop(bp, req);
5484 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5486 struct hwrm_ring_grp_alloc_output *resp;
5487 struct hwrm_ring_grp_alloc_input *req;
5491 if (bp->flags & BNXT_FLAG_CHIP_P5)
5494 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5498 resp = hwrm_req_hold(bp, req);
5499 for (i = 0; i < bp->rx_nr_rings; i++) {
5500 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5502 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5503 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5504 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5505 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5507 rc = hwrm_req_send(bp, req);
5512 bp->grp_info[grp_idx].fw_grp_id =
5513 le32_to_cpu(resp->ring_group_id);
5515 hwrm_req_drop(bp, req);
5519 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5521 struct hwrm_ring_grp_free_input *req;
5524 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5527 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5530 hwrm_req_hold(bp, req);
5531 for (i = 0; i < bp->cp_nr_rings; i++) {
5532 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5534 req->ring_group_id =
5535 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5537 hwrm_req_send(bp, req);
5538 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5540 hwrm_req_drop(bp, req);
5543 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5544 struct bnxt_ring_struct *ring,
5545 u32 ring_type, u32 map_index)
5547 struct hwrm_ring_alloc_output *resp;
5548 struct hwrm_ring_alloc_input *req;
5549 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5550 struct bnxt_ring_grp_info *grp_info;
5554 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5559 if (rmem->nr_pages > 1) {
5560 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5561 /* Page size is in log2 units */
5562 req->page_size = BNXT_PAGE_SHIFT;
5563 req->page_tbl_depth = 1;
5565 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
5568 /* Association of ring index with doorbell index and MSIX number */
5569 req->logical_id = cpu_to_le16(map_index);
5571 switch (ring_type) {
5572 case HWRM_RING_ALLOC_TX: {
5573 struct bnxt_tx_ring_info *txr;
5575 txr = container_of(ring, struct bnxt_tx_ring_info,
5577 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5578 /* Association of transmit ring with completion ring */
5579 grp_info = &bp->grp_info[ring->grp_idx];
5580 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5581 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5582 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5583 req->queue_id = cpu_to_le16(ring->queue_id);
5586 case HWRM_RING_ALLOC_RX:
5587 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5588 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
5589 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5592 /* Association of rx ring with stats context */
5593 grp_info = &bp->grp_info[ring->grp_idx];
5594 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5595 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5596 req->enables |= cpu_to_le32(
5597 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5598 if (NET_IP_ALIGN == 2)
5599 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5600 req->flags = cpu_to_le16(flags);
5603 case HWRM_RING_ALLOC_AGG:
5604 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5605 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5606 /* Association of agg ring with rx ring */
5607 grp_info = &bp->grp_info[ring->grp_idx];
5608 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5609 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5610 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5611 req->enables |= cpu_to_le32(
5612 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5613 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5615 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5617 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5619 case HWRM_RING_ALLOC_CMPL:
5620 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5621 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5622 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5623 /* Association of cp ring with nq */
5624 grp_info = &bp->grp_info[map_index];
5625 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5626 req->cq_handle = cpu_to_le64(ring->handle);
5627 req->enables |= cpu_to_le32(
5628 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5629 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5630 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5633 case HWRM_RING_ALLOC_NQ:
5634 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5635 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5636 if (bp->flags & BNXT_FLAG_USING_MSIX)
5637 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5640 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5645 resp = hwrm_req_hold(bp, req);
5646 rc = hwrm_req_send(bp, req);
5647 err = le16_to_cpu(resp->error_code);
5648 ring_id = le16_to_cpu(resp->ring_id);
5649 hwrm_req_drop(bp, req);
5653 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5654 ring_type, rc, err);
5657 ring->fw_ring_id = ring_id;
5661 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5666 struct hwrm_func_cfg_input *req;
5668 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5672 req->fid = cpu_to_le16(0xffff);
5673 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5674 req->async_event_cr = cpu_to_le16(idx);
5675 return hwrm_req_send(bp, req);
5677 struct hwrm_func_vf_cfg_input *req;
5679 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5684 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5685 req->async_event_cr = cpu_to_le16(idx);
5686 return hwrm_req_send(bp, req);
5690 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5691 u32 map_idx, u32 xid)
5693 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5695 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5697 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5698 switch (ring_type) {
5699 case HWRM_RING_ALLOC_TX:
5700 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5702 case HWRM_RING_ALLOC_RX:
5703 case HWRM_RING_ALLOC_AGG:
5704 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5706 case HWRM_RING_ALLOC_CMPL:
5707 db->db_key64 = DBR_PATH_L2;
5709 case HWRM_RING_ALLOC_NQ:
5710 db->db_key64 = DBR_PATH_L2;
5713 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5715 db->doorbell = bp->bar1 + map_idx * 0x80;
5716 switch (ring_type) {
5717 case HWRM_RING_ALLOC_TX:
5718 db->db_key32 = DB_KEY_TX;
5720 case HWRM_RING_ALLOC_RX:
5721 case HWRM_RING_ALLOC_AGG:
5722 db->db_key32 = DB_KEY_RX;
5724 case HWRM_RING_ALLOC_CMPL:
5725 db->db_key32 = DB_KEY_CP;
5731 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5733 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5737 if (bp->flags & BNXT_FLAG_CHIP_P5)
5738 type = HWRM_RING_ALLOC_NQ;
5740 type = HWRM_RING_ALLOC_CMPL;
5741 for (i = 0; i < bp->cp_nr_rings; i++) {
5742 struct bnxt_napi *bnapi = bp->bnapi[i];
5743 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5744 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5745 u32 map_idx = ring->map_idx;
5746 unsigned int vector;
5748 vector = bp->irq_tbl[map_idx].vector;
5749 disable_irq_nosync(vector);
5750 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5755 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5756 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5758 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5761 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5763 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5767 type = HWRM_RING_ALLOC_TX;
5768 for (i = 0; i < bp->tx_nr_rings; i++) {
5769 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5770 struct bnxt_ring_struct *ring;
5773 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5774 struct bnxt_napi *bnapi = txr->bnapi;
5775 struct bnxt_cp_ring_info *cpr, *cpr2;
5776 u32 type2 = HWRM_RING_ALLOC_CMPL;
5778 cpr = &bnapi->cp_ring;
5779 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5780 ring = &cpr2->cp_ring_struct;
5781 ring->handle = BNXT_TX_HDL;
5782 map_idx = bnapi->index;
5783 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5786 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5788 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5790 ring = &txr->tx_ring_struct;
5792 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5795 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5798 type = HWRM_RING_ALLOC_RX;
5799 for (i = 0; i < bp->rx_nr_rings; i++) {
5800 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5801 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5802 struct bnxt_napi *bnapi = rxr->bnapi;
5803 u32 map_idx = bnapi->index;
5805 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5808 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5809 /* If we have agg rings, post agg buffers first. */
5811 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5812 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5813 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5814 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5815 u32 type2 = HWRM_RING_ALLOC_CMPL;
5816 struct bnxt_cp_ring_info *cpr2;
5818 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5819 ring = &cpr2->cp_ring_struct;
5820 ring->handle = BNXT_RX_HDL;
5821 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5824 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5826 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5831 type = HWRM_RING_ALLOC_AGG;
5832 for (i = 0; i < bp->rx_nr_rings; i++) {
5833 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5834 struct bnxt_ring_struct *ring =
5835 &rxr->rx_agg_ring_struct;
5836 u32 grp_idx = ring->grp_idx;
5837 u32 map_idx = grp_idx + bp->rx_nr_rings;
5839 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5843 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5845 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5846 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5847 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5854 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5855 struct bnxt_ring_struct *ring,
5856 u32 ring_type, int cmpl_ring_id)
5858 struct hwrm_ring_free_output *resp;
5859 struct hwrm_ring_free_input *req;
5863 if (BNXT_NO_FW_ACCESS(bp))
5866 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
5870 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
5871 req->ring_type = ring_type;
5872 req->ring_id = cpu_to_le16(ring->fw_ring_id);
5874 resp = hwrm_req_hold(bp, req);
5875 rc = hwrm_req_send(bp, req);
5876 error_code = le16_to_cpu(resp->error_code);
5877 hwrm_req_drop(bp, req);
5879 if (rc || error_code) {
5880 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5881 ring_type, rc, error_code);
5887 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5895 for (i = 0; i < bp->tx_nr_rings; i++) {
5896 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5897 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5899 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5900 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5902 hwrm_ring_free_send_msg(bp, ring,
5903 RING_FREE_REQ_RING_TYPE_TX,
5904 close_path ? cmpl_ring_id :
5905 INVALID_HW_RING_ID);
5906 ring->fw_ring_id = INVALID_HW_RING_ID;
5910 for (i = 0; i < bp->rx_nr_rings; i++) {
5911 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5912 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5913 u32 grp_idx = rxr->bnapi->index;
5915 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5916 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5918 hwrm_ring_free_send_msg(bp, ring,
5919 RING_FREE_REQ_RING_TYPE_RX,
5920 close_path ? cmpl_ring_id :
5921 INVALID_HW_RING_ID);
5922 ring->fw_ring_id = INVALID_HW_RING_ID;
5923 bp->grp_info[grp_idx].rx_fw_ring_id =
5928 if (bp->flags & BNXT_FLAG_CHIP_P5)
5929 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5931 type = RING_FREE_REQ_RING_TYPE_RX;
5932 for (i = 0; i < bp->rx_nr_rings; i++) {
5933 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5934 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5935 u32 grp_idx = rxr->bnapi->index;
5937 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5938 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5940 hwrm_ring_free_send_msg(bp, ring, type,
5941 close_path ? cmpl_ring_id :
5942 INVALID_HW_RING_ID);
5943 ring->fw_ring_id = INVALID_HW_RING_ID;
5944 bp->grp_info[grp_idx].agg_fw_ring_id =
5949 /* The completion rings are about to be freed. After that the
5950 * IRQ doorbell will not work anymore. So we need to disable
5953 bnxt_disable_int_sync(bp);
5955 if (bp->flags & BNXT_FLAG_CHIP_P5)
5956 type = RING_FREE_REQ_RING_TYPE_NQ;
5958 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5959 for (i = 0; i < bp->cp_nr_rings; i++) {
5960 struct bnxt_napi *bnapi = bp->bnapi[i];
5961 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5962 struct bnxt_ring_struct *ring;
5965 for (j = 0; j < 2; j++) {
5966 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5969 ring = &cpr2->cp_ring_struct;
5970 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5972 hwrm_ring_free_send_msg(bp, ring,
5973 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5974 INVALID_HW_RING_ID);
5975 ring->fw_ring_id = INVALID_HW_RING_ID;
5978 ring = &cpr->cp_ring_struct;
5979 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5980 hwrm_ring_free_send_msg(bp, ring, type,
5981 INVALID_HW_RING_ID);
5982 ring->fw_ring_id = INVALID_HW_RING_ID;
5983 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5988 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5991 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5993 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5994 struct hwrm_func_qcfg_output *resp;
5995 struct hwrm_func_qcfg_input *req;
5998 if (bp->hwrm_spec_code < 0x10601)
6001 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6005 req->fid = cpu_to_le16(0xffff);
6006 resp = hwrm_req_hold(bp, req);
6007 rc = hwrm_req_send(bp, req);
6009 hwrm_req_drop(bp, req);
6013 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6014 if (BNXT_NEW_RM(bp)) {
6017 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6018 hw_resc->resv_hw_ring_grps =
6019 le32_to_cpu(resp->alloc_hw_ring_grps);
6020 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6021 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6022 stats = le16_to_cpu(resp->alloc_stat_ctx);
6023 hw_resc->resv_irqs = cp;
6024 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6025 int rx = hw_resc->resv_rx_rings;
6026 int tx = hw_resc->resv_tx_rings;
6028 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6030 if (cp < (rx + tx)) {
6031 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6032 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6034 hw_resc->resv_rx_rings = rx;
6035 hw_resc->resv_tx_rings = tx;
6037 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6038 hw_resc->resv_hw_ring_grps = rx;
6040 hw_resc->resv_cp_rings = cp;
6041 hw_resc->resv_stat_ctxs = stats;
6043 hwrm_req_drop(bp, req);
6047 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6049 struct hwrm_func_qcfg_output *resp;
6050 struct hwrm_func_qcfg_input *req;
6053 if (bp->hwrm_spec_code < 0x10601)
6056 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6060 req->fid = cpu_to_le16(fid);
6061 resp = hwrm_req_hold(bp, req);
6062 rc = hwrm_req_send(bp, req);
6064 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6066 hwrm_req_drop(bp, req);
6070 static bool bnxt_rfs_supported(struct bnxt *bp);
6072 static struct hwrm_func_cfg_input *
6073 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6074 int ring_grps, int cp_rings, int stats, int vnics)
6076 struct hwrm_func_cfg_input *req;
6079 if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6082 req->fid = cpu_to_le16(0xffff);
6083 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6084 req->num_tx_rings = cpu_to_le16(tx_rings);
6085 if (BNXT_NEW_RM(bp)) {
6086 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6087 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6088 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6089 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6090 enables |= tx_rings + ring_grps ?
6091 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6092 enables |= rx_rings ?
6093 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6095 enables |= cp_rings ?
6096 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6097 enables |= ring_grps ?
6098 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6099 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6101 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6103 req->num_rx_rings = cpu_to_le16(rx_rings);
6104 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6105 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6106 req->num_msix = cpu_to_le16(cp_rings);
6107 req->num_rsscos_ctxs =
6108 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6110 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6111 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6112 req->num_rsscos_ctxs = cpu_to_le16(1);
6113 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6114 bnxt_rfs_supported(bp))
6115 req->num_rsscos_ctxs =
6116 cpu_to_le16(ring_grps + 1);
6118 req->num_stat_ctxs = cpu_to_le16(stats);
6119 req->num_vnics = cpu_to_le16(vnics);
6121 req->enables = cpu_to_le32(enables);
6125 static struct hwrm_func_vf_cfg_input *
6126 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6127 int ring_grps, int cp_rings, int stats, int vnics)
6129 struct hwrm_func_vf_cfg_input *req;
6132 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6135 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6136 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6137 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6138 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6139 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6140 enables |= tx_rings + ring_grps ?
6141 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6143 enables |= cp_rings ?
6144 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6145 enables |= ring_grps ?
6146 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6148 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6149 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6151 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6152 req->num_tx_rings = cpu_to_le16(tx_rings);
6153 req->num_rx_rings = cpu_to_le16(rx_rings);
6154 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6155 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6156 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6158 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6159 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6160 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6162 req->num_stat_ctxs = cpu_to_le16(stats);
6163 req->num_vnics = cpu_to_le16(vnics);
6165 req->enables = cpu_to_le32(enables);
6170 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6171 int ring_grps, int cp_rings, int stats, int vnics)
6173 struct hwrm_func_cfg_input *req;
6176 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6177 cp_rings, stats, vnics);
6181 if (!req->enables) {
6182 hwrm_req_drop(bp, req);
6186 rc = hwrm_req_send(bp, req);
6190 if (bp->hwrm_spec_code < 0x10601)
6191 bp->hw_resc.resv_tx_rings = tx_rings;
6193 return bnxt_hwrm_get_rings(bp);
6197 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6198 int ring_grps, int cp_rings, int stats, int vnics)
6200 struct hwrm_func_vf_cfg_input *req;
6203 if (!BNXT_NEW_RM(bp)) {
6204 bp->hw_resc.resv_tx_rings = tx_rings;
6208 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6209 cp_rings, stats, vnics);
6213 rc = hwrm_req_send(bp, req);
6217 return bnxt_hwrm_get_rings(bp);
6220 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6221 int cp, int stat, int vnic)
6224 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6227 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6231 int bnxt_nq_rings_in_use(struct bnxt *bp)
6233 int cp = bp->cp_nr_rings;
6234 int ulp_msix, ulp_base;
6236 ulp_msix = bnxt_get_ulp_msix_num(bp);
6238 ulp_base = bnxt_get_ulp_msix_base(bp);
6240 if ((ulp_base + ulp_msix) > cp)
6241 cp = ulp_base + ulp_msix;
6246 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6250 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6251 return bnxt_nq_rings_in_use(bp);
6253 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6257 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6259 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6260 int cp = bp->cp_nr_rings;
6265 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6266 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6268 return cp + ulp_stat;
6271 /* Check if a default RSS map needs to be setup. This function is only
6272 * used on older firmware that does not require reserving RX rings.
6274 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6276 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6278 /* The RSS map is valid for RX rings set to resv_rx_rings */
6279 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6280 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6281 if (!netif_is_rxfh_configured(bp->dev))
6282 bnxt_set_dflt_rss_indir_tbl(bp);
6286 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6288 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6289 int cp = bnxt_cp_rings_in_use(bp);
6290 int nq = bnxt_nq_rings_in_use(bp);
6291 int rx = bp->rx_nr_rings, stat;
6292 int vnic = 1, grp = rx;
6294 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6295 bp->hwrm_spec_code >= 0x10601)
6298 /* Old firmware does not need RX ring reservations but we still
6299 * need to setup a default RSS map when needed. With new firmware
6300 * we go through RX ring reservations first and then set up the
6301 * RSS map for the successfully reserved RX rings when needed.
6303 if (!BNXT_NEW_RM(bp)) {
6304 bnxt_check_rss_tbl_no_rmgr(bp);
6307 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6309 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6311 stat = bnxt_get_func_stat_ctxs(bp);
6312 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6313 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6314 (hw_resc->resv_hw_ring_grps != grp &&
6315 !(bp->flags & BNXT_FLAG_CHIP_P5)))
6317 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6318 hw_resc->resv_irqs != nq)
6323 static int __bnxt_reserve_rings(struct bnxt *bp)
6325 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6326 int cp = bnxt_nq_rings_in_use(bp);
6327 int tx = bp->tx_nr_rings;
6328 int rx = bp->rx_nr_rings;
6329 int grp, rx_rings, rc;
6333 if (!bnxt_need_reserve_rings(bp))
6336 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6338 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6340 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6342 grp = bp->rx_nr_rings;
6343 stat = bnxt_get_func_stat_ctxs(bp);
6345 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6349 tx = hw_resc->resv_tx_rings;
6350 if (BNXT_NEW_RM(bp)) {
6351 rx = hw_resc->resv_rx_rings;
6352 cp = hw_resc->resv_irqs;
6353 grp = hw_resc->resv_hw_ring_grps;
6354 vnic = hw_resc->resv_vnics;
6355 stat = hw_resc->resv_stat_ctxs;
6359 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6363 if (netif_running(bp->dev))
6366 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6367 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6368 bp->dev->hw_features &= ~NETIF_F_LRO;
6369 bp->dev->features &= ~NETIF_F_LRO;
6370 bnxt_set_ring_params(bp);
6373 rx_rings = min_t(int, rx_rings, grp);
6374 cp = min_t(int, cp, bp->cp_nr_rings);
6375 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6376 stat -= bnxt_get_ulp_stat_ctxs(bp);
6377 cp = min_t(int, cp, stat);
6378 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6379 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6381 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6382 bp->tx_nr_rings = tx;
6384 /* If we cannot reserve all the RX rings, reset the RSS map only
6385 * if absolutely necessary
6387 if (rx_rings != bp->rx_nr_rings) {
6388 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6389 rx_rings, bp->rx_nr_rings);
6390 if (netif_is_rxfh_configured(bp->dev) &&
6391 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6392 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6393 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6394 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6395 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6398 bp->rx_nr_rings = rx_rings;
6399 bp->cp_nr_rings = cp;
6401 if (!tx || !rx || !cp || !grp || !vnic || !stat)
6404 if (!netif_is_rxfh_configured(bp->dev))
6405 bnxt_set_dflt_rss_indir_tbl(bp);
6410 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6411 int ring_grps, int cp_rings, int stats,
6414 struct hwrm_func_vf_cfg_input *req;
6417 if (!BNXT_NEW_RM(bp))
6420 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6421 cp_rings, stats, vnics);
6422 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6423 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6424 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6425 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6426 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6427 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6428 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6429 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6431 req->flags = cpu_to_le32(flags);
6432 return hwrm_req_send_silent(bp, req);
6435 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6436 int ring_grps, int cp_rings, int stats,
6439 struct hwrm_func_cfg_input *req;
6442 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6443 cp_rings, stats, vnics);
6444 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6445 if (BNXT_NEW_RM(bp)) {
6446 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6447 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6448 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6449 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6450 if (bp->flags & BNXT_FLAG_CHIP_P5)
6451 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6452 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6454 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6457 req->flags = cpu_to_le32(flags);
6458 return hwrm_req_send_silent(bp, req);
6461 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6462 int ring_grps, int cp_rings, int stats,
6465 if (bp->hwrm_spec_code < 0x10801)
6469 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6470 ring_grps, cp_rings, stats,
6473 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6474 cp_rings, stats, vnics);
6477 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6479 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6480 struct hwrm_ring_aggint_qcaps_output *resp;
6481 struct hwrm_ring_aggint_qcaps_input *req;
6484 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6485 coal_cap->num_cmpl_dma_aggr_max = 63;
6486 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6487 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6488 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6489 coal_cap->int_lat_tmr_min_max = 65535;
6490 coal_cap->int_lat_tmr_max_max = 65535;
6491 coal_cap->num_cmpl_aggr_int_max = 65535;
6492 coal_cap->timer_units = 80;
6494 if (bp->hwrm_spec_code < 0x10902)
6497 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6500 resp = hwrm_req_hold(bp, req);
6501 rc = hwrm_req_send_silent(bp, req);
6503 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6504 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6505 coal_cap->num_cmpl_dma_aggr_max =
6506 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6507 coal_cap->num_cmpl_dma_aggr_during_int_max =
6508 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6509 coal_cap->cmpl_aggr_dma_tmr_max =
6510 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6511 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6512 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6513 coal_cap->int_lat_tmr_min_max =
6514 le16_to_cpu(resp->int_lat_tmr_min_max);
6515 coal_cap->int_lat_tmr_max_max =
6516 le16_to_cpu(resp->int_lat_tmr_max_max);
6517 coal_cap->num_cmpl_aggr_int_max =
6518 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6519 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6521 hwrm_req_drop(bp, req);
6524 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6526 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6528 return usec * 1000 / coal_cap->timer_units;
6531 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6532 struct bnxt_coal *hw_coal,
6533 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6535 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6536 u16 val, tmr, max, flags = hw_coal->flags;
6537 u32 cmpl_params = coal_cap->cmpl_params;
6539 max = hw_coal->bufs_per_record * 128;
6540 if (hw_coal->budget)
6541 max = hw_coal->bufs_per_record * hw_coal->budget;
6542 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6544 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6545 req->num_cmpl_aggr_int = cpu_to_le16(val);
6547 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6548 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6550 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6551 coal_cap->num_cmpl_dma_aggr_during_int_max);
6552 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6554 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6555 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6556 req->int_lat_tmr_max = cpu_to_le16(tmr);
6558 /* min timer set to 1/2 of interrupt timer */
6559 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6561 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6562 req->int_lat_tmr_min = cpu_to_le16(val);
6563 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6566 /* buf timer set to 1/4 of interrupt timer */
6567 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6568 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6571 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6572 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6573 val = clamp_t(u16, tmr, 1,
6574 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6575 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6577 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6580 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6581 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6582 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6583 req->flags = cpu_to_le16(flags);
6584 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6587 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6588 struct bnxt_coal *hw_coal)
6590 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
6591 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6592 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6593 u32 nq_params = coal_cap->nq_params;
6597 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6600 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6604 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6606 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6608 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6609 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6610 req->int_lat_tmr_min = cpu_to_le16(tmr);
6611 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6612 return hwrm_req_send(bp, req);
6615 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6617 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6618 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6619 struct bnxt_coal coal;
6622 /* Tick values in micro seconds.
6623 * 1 coal_buf x bufs_per_record = 1 completion record.
6625 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6627 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6628 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6630 if (!bnapi->rx_ring)
6633 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6637 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6639 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6641 return hwrm_req_send(bp, req_rx);
6644 int bnxt_hwrm_set_coal(struct bnxt *bp)
6646 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6650 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6654 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6656 hwrm_req_drop(bp, req_rx);
6660 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6661 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
6663 hwrm_req_hold(bp, req_rx);
6664 hwrm_req_hold(bp, req_tx);
6665 for (i = 0; i < bp->cp_nr_rings; i++) {
6666 struct bnxt_napi *bnapi = bp->bnapi[i];
6667 struct bnxt_coal *hw_coal;
6671 if (!bnapi->rx_ring) {
6672 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6675 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6677 req->ring_id = cpu_to_le16(ring_id);
6679 rc = hwrm_req_send(bp, req);
6683 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6686 if (bnapi->rx_ring && bnapi->tx_ring) {
6688 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6689 req->ring_id = cpu_to_le16(ring_id);
6690 rc = hwrm_req_send(bp, req);
6695 hw_coal = &bp->rx_coal;
6697 hw_coal = &bp->tx_coal;
6698 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6700 hwrm_req_drop(bp, req_rx);
6701 hwrm_req_drop(bp, req_tx);
6705 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6707 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6708 struct hwrm_stat_ctx_free_input *req;
6714 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6717 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6719 if (BNXT_FW_MAJ(bp) <= 20) {
6720 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6721 hwrm_req_drop(bp, req);
6724 hwrm_req_hold(bp, req0);
6726 hwrm_req_hold(bp, req);
6727 for (i = 0; i < bp->cp_nr_rings; i++) {
6728 struct bnxt_napi *bnapi = bp->bnapi[i];
6729 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6731 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6732 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6734 req0->stat_ctx_id = req->stat_ctx_id;
6735 hwrm_req_send(bp, req0);
6737 hwrm_req_send(bp, req);
6739 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6742 hwrm_req_drop(bp, req);
6744 hwrm_req_drop(bp, req0);
6747 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6749 struct hwrm_stat_ctx_alloc_output *resp;
6750 struct hwrm_stat_ctx_alloc_input *req;
6753 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6756 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6760 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6761 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6763 resp = hwrm_req_hold(bp, req);
6764 for (i = 0; i < bp->cp_nr_rings; i++) {
6765 struct bnxt_napi *bnapi = bp->bnapi[i];
6766 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6768 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6770 rc = hwrm_req_send(bp, req);
6774 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6776 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6778 hwrm_req_drop(bp, req);
6782 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6784 struct hwrm_func_qcfg_output *resp;
6785 struct hwrm_func_qcfg_input *req;
6786 u32 min_db_offset = 0;
6790 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6794 req->fid = cpu_to_le16(0xffff);
6795 resp = hwrm_req_hold(bp, req);
6796 rc = hwrm_req_send(bp, req);
6798 goto func_qcfg_exit;
6800 #ifdef CONFIG_BNXT_SRIOV
6802 struct bnxt_vf_info *vf = &bp->vf;
6804 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6806 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6809 flags = le16_to_cpu(resp->flags);
6810 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6811 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6812 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6813 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6814 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6816 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6817 bp->flags |= BNXT_FLAG_MULTI_HOST;
6818 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6819 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6821 switch (resp->port_partition_type) {
6822 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6823 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6824 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6825 bp->port_partition_type = resp->port_partition_type;
6828 if (bp->hwrm_spec_code < 0x10707 ||
6829 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6830 bp->br_mode = BRIDGE_MODE_VEB;
6831 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6832 bp->br_mode = BRIDGE_MODE_VEPA;
6834 bp->br_mode = BRIDGE_MODE_UNDEF;
6836 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6838 bp->max_mtu = BNXT_MAX_MTU;
6841 goto func_qcfg_exit;
6843 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6845 min_db_offset = DB_PF_OFFSET_P5;
6847 min_db_offset = DB_VF_OFFSET_P5;
6849 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6851 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6852 bp->db_size <= min_db_offset)
6853 bp->db_size = pci_resource_len(bp->pdev, 2);
6856 hwrm_req_drop(bp, req);
6860 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6861 struct hwrm_func_backing_store_qcaps_output *resp)
6863 struct bnxt_mem_init *mem_init;
6869 init_val = resp->ctx_kind_initializer;
6870 init_mask = le16_to_cpu(resp->ctx_init_mask);
6871 offset = &resp->qp_init_offset;
6872 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6873 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
6874 mem_init->init_val = init_val;
6875 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6878 if (i == BNXT_CTX_MEM_INIT_STAT)
6879 offset = &resp->stat_init_offset;
6880 if (init_mask & (1 << i))
6881 mem_init->offset = *offset * 4;
6883 mem_init->init_val = 0;
6885 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6886 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6887 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6888 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6889 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6890 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
6893 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6895 struct hwrm_func_backing_store_qcaps_output *resp;
6896 struct hwrm_func_backing_store_qcaps_input *req;
6899 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6902 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
6906 resp = hwrm_req_hold(bp, req);
6907 rc = hwrm_req_send_silent(bp, req);
6909 struct bnxt_ctx_pg_info *ctx_pg;
6910 struct bnxt_ctx_mem_info *ctx;
6913 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6918 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6919 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6920 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6921 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6922 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6923 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6924 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6925 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6926 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6927 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6928 ctx->vnic_max_vnic_entries =
6929 le16_to_cpu(resp->vnic_max_vnic_entries);
6930 ctx->vnic_max_ring_table_entries =
6931 le16_to_cpu(resp->vnic_max_ring_table_entries);
6932 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6933 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6934 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6935 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6936 ctx->tqm_min_entries_per_ring =
6937 le32_to_cpu(resp->tqm_min_entries_per_ring);
6938 ctx->tqm_max_entries_per_ring =
6939 le32_to_cpu(resp->tqm_max_entries_per_ring);
6940 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6941 if (!ctx->tqm_entries_multiple)
6942 ctx->tqm_entries_multiple = 1;
6943 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6944 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6945 ctx->mrav_num_entries_units =
6946 le16_to_cpu(resp->mrav_num_entries_units);
6947 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6948 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6950 bnxt_init_ctx_initializer(ctx, resp);
6952 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6953 if (!ctx->tqm_fp_rings_count)
6954 ctx->tqm_fp_rings_count = bp->max_q;
6955 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6956 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
6958 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
6959 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6965 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6966 ctx->tqm_mem[i] = ctx_pg;
6972 hwrm_req_drop(bp, req);
6976 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6979 if (!rmem->nr_pages)
6982 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
6983 if (rmem->depth >= 1) {
6984 if (rmem->depth == 2)
6988 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6990 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6994 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6995 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6996 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6997 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6998 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6999 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7001 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7003 struct hwrm_func_backing_store_cfg_input *req;
7004 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7005 struct bnxt_ctx_pg_info *ctx_pg;
7006 void **__req = (void **)&req;
7007 u32 req_len = sizeof(*req);
7008 __le32 *num_entries;
7019 if (req_len > bp->hwrm_max_ext_req_len)
7020 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
7021 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
7025 req->enables = cpu_to_le32(enables);
7026 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7027 ctx_pg = &ctx->qp_mem;
7028 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
7029 req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7030 req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7031 req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7032 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7033 &req->qpc_pg_size_qpc_lvl,
7034 &req->qpc_page_dir);
7036 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7037 ctx_pg = &ctx->srq_mem;
7038 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
7039 req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7040 req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7041 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7042 &req->srq_pg_size_srq_lvl,
7043 &req->srq_page_dir);
7045 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7046 ctx_pg = &ctx->cq_mem;
7047 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
7048 req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7049 req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7050 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7051 &req->cq_pg_size_cq_lvl,
7054 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7055 ctx_pg = &ctx->vnic_mem;
7056 req->vnic_num_vnic_entries =
7057 cpu_to_le16(ctx->vnic_max_vnic_entries);
7058 req->vnic_num_ring_table_entries =
7059 cpu_to_le16(ctx->vnic_max_ring_table_entries);
7060 req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7061 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7062 &req->vnic_pg_size_vnic_lvl,
7063 &req->vnic_page_dir);
7065 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7066 ctx_pg = &ctx->stat_mem;
7067 req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7068 req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7069 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7070 &req->stat_pg_size_stat_lvl,
7071 &req->stat_page_dir);
7073 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7074 ctx_pg = &ctx->mrav_mem;
7075 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7076 if (ctx->mrav_num_entries_units)
7078 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7079 req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7080 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7081 &req->mrav_pg_size_mrav_lvl,
7082 &req->mrav_page_dir);
7084 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7085 ctx_pg = &ctx->tim_mem;
7086 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7087 req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7088 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7089 &req->tim_pg_size_tim_lvl,
7090 &req->tim_page_dir);
7092 for (i = 0, num_entries = &req->tqm_sp_num_entries,
7093 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7094 pg_dir = &req->tqm_sp_page_dir,
7095 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7096 i < BNXT_MAX_TQM_RINGS;
7097 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7098 if (!(enables & ena))
7101 req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7102 ctx_pg = ctx->tqm_mem[i];
7103 *num_entries = cpu_to_le32(ctx_pg->entries);
7104 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7106 req->flags = cpu_to_le32(flags);
7107 return hwrm_req_send(bp, req);
7110 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7111 struct bnxt_ctx_pg_info *ctx_pg)
7113 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7115 rmem->page_size = BNXT_PAGE_SIZE;
7116 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7117 rmem->dma_arr = ctx_pg->ctx_dma_arr;
7118 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7119 if (rmem->depth >= 1)
7120 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7121 return bnxt_alloc_ring(bp, rmem);
7124 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7125 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7126 u8 depth, struct bnxt_mem_init *mem_init)
7128 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7134 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7135 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7136 ctx_pg->nr_pages = 0;
7139 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7143 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7145 if (!ctx_pg->ctx_pg_tbl)
7147 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7148 rmem->nr_pages = nr_tbls;
7149 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7152 for (i = 0; i < nr_tbls; i++) {
7153 struct bnxt_ctx_pg_info *pg_tbl;
7155 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7158 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7159 rmem = &pg_tbl->ring_mem;
7160 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7161 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7163 rmem->nr_pages = MAX_CTX_PAGES;
7164 rmem->mem_init = mem_init;
7165 if (i == (nr_tbls - 1)) {
7166 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7169 rmem->nr_pages = rem;
7171 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7176 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7177 if (rmem->nr_pages > 1 || depth)
7179 rmem->mem_init = mem_init;
7180 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7185 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7186 struct bnxt_ctx_pg_info *ctx_pg)
7188 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7190 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7191 ctx_pg->ctx_pg_tbl) {
7192 int i, nr_tbls = rmem->nr_pages;
7194 for (i = 0; i < nr_tbls; i++) {
7195 struct bnxt_ctx_pg_info *pg_tbl;
7196 struct bnxt_ring_mem_info *rmem2;
7198 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7201 rmem2 = &pg_tbl->ring_mem;
7202 bnxt_free_ring(bp, rmem2);
7203 ctx_pg->ctx_pg_arr[i] = NULL;
7205 ctx_pg->ctx_pg_tbl[i] = NULL;
7207 kfree(ctx_pg->ctx_pg_tbl);
7208 ctx_pg->ctx_pg_tbl = NULL;
7210 bnxt_free_ring(bp, rmem);
7211 ctx_pg->nr_pages = 0;
7214 void bnxt_free_ctx_mem(struct bnxt *bp)
7216 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7222 if (ctx->tqm_mem[0]) {
7223 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7224 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7225 kfree(ctx->tqm_mem[0]);
7226 ctx->tqm_mem[0] = NULL;
7229 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7230 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7231 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7232 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7233 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7234 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7235 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7236 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7239 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7241 struct bnxt_ctx_pg_info *ctx_pg;
7242 struct bnxt_ctx_mem_info *ctx;
7243 struct bnxt_mem_init *init;
7244 u32 mem_size, ena, entries;
7245 u32 entries_sp, min;
7252 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7254 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7259 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7262 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7268 ctx_pg = &ctx->qp_mem;
7269 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7271 if (ctx->qp_entry_size) {
7272 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7273 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7274 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7279 ctx_pg = &ctx->srq_mem;
7280 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7281 if (ctx->srq_entry_size) {
7282 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7283 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7284 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7289 ctx_pg = &ctx->cq_mem;
7290 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7291 if (ctx->cq_entry_size) {
7292 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7293 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7294 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7299 ctx_pg = &ctx->vnic_mem;
7300 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7301 ctx->vnic_max_ring_table_entries;
7302 if (ctx->vnic_entry_size) {
7303 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7304 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7305 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7310 ctx_pg = &ctx->stat_mem;
7311 ctx_pg->entries = ctx->stat_max_entries;
7312 if (ctx->stat_entry_size) {
7313 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7314 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7315 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7321 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7324 ctx_pg = &ctx->mrav_mem;
7325 /* 128K extra is needed to accommodate static AH context
7326 * allocation by f/w.
7328 num_mr = 1024 * 256;
7329 num_ah = 1024 * 128;
7330 ctx_pg->entries = num_mr + num_ah;
7331 if (ctx->mrav_entry_size) {
7332 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7333 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7334 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7338 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7339 if (ctx->mrav_num_entries_units)
7341 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7342 (num_ah / ctx->mrav_num_entries_units);
7344 ctx_pg = &ctx->tim_mem;
7345 ctx_pg->entries = ctx->qp_mem.entries;
7346 if (ctx->tim_entry_size) {
7347 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7348 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7352 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7355 min = ctx->tqm_min_entries_per_ring;
7356 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7357 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7358 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7359 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7360 entries = roundup(entries, ctx->tqm_entries_multiple);
7361 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7362 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7363 ctx_pg = ctx->tqm_mem[i];
7364 ctx_pg->entries = i ? entries : entries_sp;
7365 if (ctx->tqm_entry_size) {
7366 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7367 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7372 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7374 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7375 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7377 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7381 ctx->flags |= BNXT_CTX_FLAG_INITED;
7385 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7387 struct hwrm_func_resource_qcaps_output *resp;
7388 struct hwrm_func_resource_qcaps_input *req;
7389 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7392 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7396 req->fid = cpu_to_le16(0xffff);
7397 resp = hwrm_req_hold(bp, req);
7398 rc = hwrm_req_send_silent(bp, req);
7400 goto hwrm_func_resc_qcaps_exit;
7402 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7404 goto hwrm_func_resc_qcaps_exit;
7406 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7407 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7408 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7409 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7410 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7411 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7412 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7413 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7414 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7415 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7416 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7417 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7418 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7419 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7420 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7421 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7423 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7424 u16 max_msix = le16_to_cpu(resp->max_msix);
7426 hw_resc->max_nqs = max_msix;
7427 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7431 struct bnxt_pf_info *pf = &bp->pf;
7433 pf->vf_resv_strategy =
7434 le16_to_cpu(resp->vf_reservation_strategy);
7435 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7436 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7438 hwrm_func_resc_qcaps_exit:
7439 hwrm_req_drop(bp, req);
7443 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7445 struct hwrm_port_mac_ptp_qcfg_output *resp;
7446 struct hwrm_port_mac_ptp_qcfg_input *req;
7447 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7452 if (bp->hwrm_spec_code < 0x10801) {
7457 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
7461 req->port_id = cpu_to_le16(bp->pf.port_id);
7462 resp = hwrm_req_hold(bp, req);
7463 rc = hwrm_req_send(bp, req);
7467 flags = resp->flags;
7468 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7473 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7481 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7482 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7483 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7484 } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7485 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7486 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7491 phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
7492 rc = bnxt_ptp_init(bp, phc_cfg);
7494 netdev_warn(bp->dev, "PTP initialization failed.\n");
7496 hwrm_req_drop(bp, req);
7507 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7509 struct hwrm_func_qcaps_output *resp;
7510 struct hwrm_func_qcaps_input *req;
7511 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7512 u32 flags, flags_ext;
7515 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7519 req->fid = cpu_to_le16(0xffff);
7520 resp = hwrm_req_hold(bp, req);
7521 rc = hwrm_req_send(bp, req);
7523 goto hwrm_func_qcaps_exit;
7525 flags = le32_to_cpu(resp->flags);
7526 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7527 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7528 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7529 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7530 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7531 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7532 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7533 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7534 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7535 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7536 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7537 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7538 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7539 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7540 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7541 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7542 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
7543 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
7545 flags_ext = le32_to_cpu(resp->flags_ext);
7546 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7547 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7548 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7549 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
7550 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
7551 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
7552 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
7553 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
7554 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
7555 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
7557 bp->tx_push_thresh = 0;
7558 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7559 BNXT_FW_MAJ(bp) > 217)
7560 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7562 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7563 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7564 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7565 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7566 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7567 if (!hw_resc->max_hw_ring_grps)
7568 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7569 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7570 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7571 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7574 struct bnxt_pf_info *pf = &bp->pf;
7576 pf->fw_fid = le16_to_cpu(resp->fid);
7577 pf->port_id = le16_to_cpu(resp->port_id);
7578 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7579 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7580 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7581 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7582 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7583 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7584 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7585 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7586 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7587 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7588 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7589 bp->flags |= BNXT_FLAG_WOL_CAP;
7590 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7591 __bnxt_hwrm_ptp_qcfg(bp);
7598 #ifdef CONFIG_BNXT_SRIOV
7599 struct bnxt_vf_info *vf = &bp->vf;
7601 vf->fw_fid = le16_to_cpu(resp->fid);
7602 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7606 hwrm_func_qcaps_exit:
7607 hwrm_req_drop(bp, req);
7611 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
7613 struct hwrm_dbg_qcaps_output *resp;
7614 struct hwrm_dbg_qcaps_input *req;
7618 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
7621 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
7625 req->fid = cpu_to_le16(0xffff);
7626 resp = hwrm_req_hold(bp, req);
7627 rc = hwrm_req_send(bp, req);
7629 goto hwrm_dbg_qcaps_exit;
7631 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
7633 hwrm_dbg_qcaps_exit:
7634 hwrm_req_drop(bp, req);
7637 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7639 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7643 rc = __bnxt_hwrm_func_qcaps(bp);
7647 bnxt_hwrm_dbg_qcaps(bp);
7649 rc = bnxt_hwrm_queue_qportcfg(bp);
7651 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7654 if (bp->hwrm_spec_code >= 0x10803) {
7655 rc = bnxt_alloc_ctx_mem(bp);
7658 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7660 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7665 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7667 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7668 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
7672 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7675 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7679 resp = hwrm_req_hold(bp, req);
7680 rc = hwrm_req_send(bp, req);
7682 goto hwrm_cfa_adv_qcaps_exit;
7684 flags = le32_to_cpu(resp->flags);
7686 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7687 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7689 hwrm_cfa_adv_qcaps_exit:
7690 hwrm_req_drop(bp, req);
7694 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7699 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7703 mutex_init(&bp->fw_health->lock);
7707 static int bnxt_alloc_fw_health(struct bnxt *bp)
7711 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7712 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7715 rc = __bnxt_alloc_fw_health(bp);
7717 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7718 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7725 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7727 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7728 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7729 BNXT_FW_HEALTH_WIN_MAP_OFF);
7732 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7734 struct bnxt_fw_health *fw_health = bp->fw_health;
7740 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7741 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7742 fw_health->status_reliable = false;
7744 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
7745 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7746 fw_health->resets_reliable = false;
7749 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7757 bp->fw_health->status_reliable = false;
7759 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7760 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7762 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7763 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7764 if (!bp->chip_num) {
7765 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7766 bp->chip_num = readl(bp->bar0 +
7767 BNXT_FW_HEALTH_WIN_BASE +
7768 BNXT_GRC_REG_CHIP_NUM);
7770 if (!BNXT_CHIP_P5(bp))
7773 status_loc = BNXT_GRC_REG_STATUS_P5 |
7774 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7776 status_loc = readl(hs + offsetof(struct hcomm_status,
7780 if (__bnxt_alloc_fw_health(bp)) {
7781 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7785 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7786 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7787 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7788 __bnxt_map_fw_health_reg(bp, status_loc);
7789 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7790 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7793 bp->fw_health->status_reliable = true;
7796 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7798 struct bnxt_fw_health *fw_health = bp->fw_health;
7799 u32 reg_base = 0xffffffff;
7802 bp->fw_health->status_reliable = false;
7803 bp->fw_health->resets_reliable = false;
7804 /* Only pre-map the monitoring GRC registers using window 3 */
7805 for (i = 0; i < 4; i++) {
7806 u32 reg = fw_health->regs[i];
7808 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7810 if (reg_base == 0xffffffff)
7811 reg_base = reg & BNXT_GRC_BASE_MASK;
7812 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7814 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7816 bp->fw_health->status_reliable = true;
7817 bp->fw_health->resets_reliable = true;
7818 if (reg_base == 0xffffffff)
7821 __bnxt_map_fw_health_reg(bp, reg_base);
7825 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
7830 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
7831 bp->fw_health->status_reliable = true;
7832 bp->fw_health->resets_reliable = true;
7834 bnxt_try_map_fw_health_reg(bp);
7838 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7840 struct bnxt_fw_health *fw_health = bp->fw_health;
7841 struct hwrm_error_recovery_qcfg_output *resp;
7842 struct hwrm_error_recovery_qcfg_input *req;
7845 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7848 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7852 resp = hwrm_req_hold(bp, req);
7853 rc = hwrm_req_send(bp, req);
7855 goto err_recovery_out;
7856 fw_health->flags = le32_to_cpu(resp->flags);
7857 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7858 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7860 goto err_recovery_out;
7862 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7863 fw_health->master_func_wait_dsecs =
7864 le32_to_cpu(resp->master_func_wait_period);
7865 fw_health->normal_func_wait_dsecs =
7866 le32_to_cpu(resp->normal_func_wait_period);
7867 fw_health->post_reset_wait_dsecs =
7868 le32_to_cpu(resp->master_func_wait_period_after_reset);
7869 fw_health->post_reset_max_wait_dsecs =
7870 le32_to_cpu(resp->max_bailout_time_after_reset);
7871 fw_health->regs[BNXT_FW_HEALTH_REG] =
7872 le32_to_cpu(resp->fw_health_status_reg);
7873 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7874 le32_to_cpu(resp->fw_heartbeat_reg);
7875 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7876 le32_to_cpu(resp->fw_reset_cnt_reg);
7877 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7878 le32_to_cpu(resp->reset_inprogress_reg);
7879 fw_health->fw_reset_inprog_reg_mask =
7880 le32_to_cpu(resp->reset_inprogress_reg_mask);
7881 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7882 if (fw_health->fw_reset_seq_cnt >= 16) {
7884 goto err_recovery_out;
7886 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7887 fw_health->fw_reset_seq_regs[i] =
7888 le32_to_cpu(resp->reset_reg[i]);
7889 fw_health->fw_reset_seq_vals[i] =
7890 le32_to_cpu(resp->reset_reg_val[i]);
7891 fw_health->fw_reset_seq_delay_msec[i] =
7892 resp->delay_after_reset[i];
7895 hwrm_req_drop(bp, req);
7897 rc = bnxt_map_fw_health_regs(bp);
7899 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7903 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7905 struct hwrm_func_reset_input *req;
7908 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
7913 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
7914 return hwrm_req_send(bp, req);
7917 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7919 struct hwrm_nvm_get_dev_info_output nvm_info;
7921 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7922 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7923 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7924 nvm_info.nvm_cfg_ver_upd);
7927 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7929 struct hwrm_queue_qportcfg_output *resp;
7930 struct hwrm_queue_qportcfg_input *req;
7935 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
7939 resp = hwrm_req_hold(bp, req);
7940 rc = hwrm_req_send(bp, req);
7944 if (!resp->max_configurable_queues) {
7948 bp->max_tc = resp->max_configurable_queues;
7949 bp->max_lltc = resp->max_configurable_lossless_queues;
7950 if (bp->max_tc > BNXT_MAX_QUEUE)
7951 bp->max_tc = BNXT_MAX_QUEUE;
7953 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7954 qptr = &resp->queue_id0;
7955 for (i = 0, j = 0; i < bp->max_tc; i++) {
7956 bp->q_info[j].queue_id = *qptr;
7957 bp->q_ids[i] = *qptr++;
7958 bp->q_info[j].queue_profile = *qptr++;
7959 bp->tc_to_qidx[j] = j;
7960 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7961 (no_rdma && BNXT_PF(bp)))
7964 bp->max_q = bp->max_tc;
7965 bp->max_tc = max_t(u8, j, 1);
7967 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7970 if (bp->max_lltc > bp->max_tc)
7971 bp->max_lltc = bp->max_tc;
7974 hwrm_req_drop(bp, req);
7978 static int bnxt_hwrm_poll(struct bnxt *bp)
7980 struct hwrm_ver_get_input *req;
7983 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7987 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7988 req->hwrm_intf_min = HWRM_VERSION_MINOR;
7989 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7991 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
7992 rc = hwrm_req_send(bp, req);
7996 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7998 struct hwrm_ver_get_output *resp;
7999 struct hwrm_ver_get_input *req;
8000 u16 fw_maj, fw_min, fw_bld, fw_rsv;
8001 u32 dev_caps_cfg, hwrm_ver;
8004 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8008 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
8009 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
8010 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8011 req->hwrm_intf_min = HWRM_VERSION_MINOR;
8012 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
8014 resp = hwrm_req_hold(bp, req);
8015 rc = hwrm_req_send(bp, req);
8017 goto hwrm_ver_get_exit;
8019 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
8021 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
8022 resp->hwrm_intf_min_8b << 8 |
8023 resp->hwrm_intf_upd_8b;
8024 if (resp->hwrm_intf_maj_8b < 1) {
8025 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
8026 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8027 resp->hwrm_intf_upd_8b);
8028 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
8031 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
8032 HWRM_VERSION_UPDATE;
8034 if (bp->hwrm_spec_code > hwrm_ver)
8035 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8036 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
8037 HWRM_VERSION_UPDATE);
8039 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8040 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8041 resp->hwrm_intf_upd_8b);
8043 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
8044 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
8045 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
8046 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
8047 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
8048 len = FW_VER_STR_LEN;
8050 fw_maj = resp->hwrm_fw_maj_8b;
8051 fw_min = resp->hwrm_fw_min_8b;
8052 fw_bld = resp->hwrm_fw_bld_8b;
8053 fw_rsv = resp->hwrm_fw_rsvd_8b;
8054 len = BC_HWRM_STR_LEN;
8056 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
8057 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
8060 if (strlen(resp->active_pkg_name)) {
8061 int fw_ver_len = strlen(bp->fw_ver_str);
8063 snprintf(bp->fw_ver_str + fw_ver_len,
8064 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8065 resp->active_pkg_name);
8066 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8069 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8070 if (!bp->hwrm_cmd_timeout)
8071 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8072 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
8073 if (!bp->hwrm_cmd_max_timeout)
8074 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
8075 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
8076 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
8077 bp->hwrm_cmd_max_timeout / 1000);
8079 if (resp->hwrm_intf_maj_8b >= 1) {
8080 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
8081 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8083 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8084 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
8086 bp->chip_num = le16_to_cpu(resp->chip_num);
8087 bp->chip_rev = resp->chip_rev;
8088 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8090 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
8092 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8093 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8094 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
8095 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8097 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8098 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8101 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8102 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8105 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8106 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8109 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8110 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8113 hwrm_req_drop(bp, req);
8117 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8119 struct hwrm_fw_set_time_input *req;
8121 time64_t now = ktime_get_real_seconds();
8124 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8125 bp->hwrm_spec_code < 0x10400)
8128 time64_to_tm(now, 0, &tm);
8129 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8133 req->year = cpu_to_le16(1900 + tm.tm_year);
8134 req->month = 1 + tm.tm_mon;
8135 req->day = tm.tm_mday;
8136 req->hour = tm.tm_hour;
8137 req->minute = tm.tm_min;
8138 req->second = tm.tm_sec;
8139 return hwrm_req_send(bp, req);
8142 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8147 sw_tmp = (*sw & ~mask) | hw;
8148 if (hw < (*sw & mask))
8150 WRITE_ONCE(*sw, sw_tmp);
8153 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8154 int count, bool ignore_zero)
8158 for (i = 0; i < count; i++) {
8159 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8161 if (ignore_zero && !hw)
8164 if (masks[i] == -1ULL)
8167 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8171 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8173 if (!stats->hw_stats)
8176 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8177 stats->hw_masks, stats->len / 8, false);
8180 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8182 struct bnxt_stats_mem *ring0_stats;
8183 bool ignore_zero = false;
8186 /* Chip bug. Counter intermittently becomes 0. */
8187 if (bp->flags & BNXT_FLAG_CHIP_P5)
8190 for (i = 0; i < bp->cp_nr_rings; i++) {
8191 struct bnxt_napi *bnapi = bp->bnapi[i];
8192 struct bnxt_cp_ring_info *cpr;
8193 struct bnxt_stats_mem *stats;
8195 cpr = &bnapi->cp_ring;
8196 stats = &cpr->stats;
8198 ring0_stats = stats;
8199 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8200 ring0_stats->hw_masks,
8201 ring0_stats->len / 8, ignore_zero);
8203 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8204 struct bnxt_stats_mem *stats = &bp->port_stats;
8205 __le64 *hw_stats = stats->hw_stats;
8206 u64 *sw_stats = stats->sw_stats;
8207 u64 *masks = stats->hw_masks;
8210 cnt = sizeof(struct rx_port_stats) / 8;
8211 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8213 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8214 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8215 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8216 cnt = sizeof(struct tx_port_stats) / 8;
8217 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8219 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8220 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8221 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8225 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8227 struct hwrm_port_qstats_input *req;
8228 struct bnxt_pf_info *pf = &bp->pf;
8231 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8234 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8237 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8242 req->port_id = cpu_to_le16(pf->port_id);
8243 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8244 BNXT_TX_PORT_STATS_BYTE_OFFSET);
8245 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8246 return hwrm_req_send(bp, req);
8249 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8251 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8252 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8253 struct hwrm_port_qstats_ext_output *resp_qs;
8254 struct hwrm_port_qstats_ext_input *req_qs;
8255 struct bnxt_pf_info *pf = &bp->pf;
8259 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8262 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8265 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8269 req_qs->flags = flags;
8270 req_qs->port_id = cpu_to_le16(pf->port_id);
8271 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8272 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8273 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8274 sizeof(struct tx_port_stats_ext) : 0;
8275 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8276 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8277 resp_qs = hwrm_req_hold(bp, req_qs);
8278 rc = hwrm_req_send(bp, req_qs);
8280 bp->fw_rx_stats_ext_size =
8281 le16_to_cpu(resp_qs->rx_stat_size) / 8;
8282 if (BNXT_FW_MAJ(bp) < 220 &&
8283 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
8284 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
8286 bp->fw_tx_stats_ext_size = tx_stat_size ?
8287 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
8289 bp->fw_rx_stats_ext_size = 0;
8290 bp->fw_tx_stats_ext_size = 0;
8292 hwrm_req_drop(bp, req_qs);
8297 if (bp->fw_tx_stats_ext_size <=
8298 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8299 bp->pri2cos_valid = 0;
8303 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8307 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8309 resp_qc = hwrm_req_hold(bp, req_qc);
8310 rc = hwrm_req_send(bp, req_qc);
8315 pri2cos = &resp_qc->pri0_cos_queue_id;
8316 for (i = 0; i < 8; i++) {
8317 u8 queue_id = pri2cos[i];
8320 /* Per port queue IDs start from 0, 10, 20, etc */
8321 queue_idx = queue_id % 10;
8322 if (queue_idx > BNXT_MAX_QUEUE) {
8323 bp->pri2cos_valid = false;
8324 hwrm_req_drop(bp, req_qc);
8327 for (j = 0; j < bp->max_q; j++) {
8328 if (bp->q_ids[j] == queue_id)
8329 bp->pri2cos_idx[i] = queue_idx;
8332 bp->pri2cos_valid = true;
8334 hwrm_req_drop(bp, req_qc);
8339 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8341 bnxt_hwrm_tunnel_dst_port_free(bp,
8342 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8343 bnxt_hwrm_tunnel_dst_port_free(bp,
8344 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8347 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8353 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8354 else if (BNXT_NO_FW_ACCESS(bp))
8356 for (i = 0; i < bp->nr_vnics; i++) {
8357 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8359 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8367 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8371 for (i = 0; i < bp->nr_vnics; i++)
8372 bnxt_hwrm_vnic_set_rss(bp, i, false);
8375 static void bnxt_clear_vnic(struct bnxt *bp)
8380 bnxt_hwrm_clear_vnic_filter(bp);
8381 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8382 /* clear all RSS setting before free vnic ctx */
8383 bnxt_hwrm_clear_vnic_rss(bp);
8384 bnxt_hwrm_vnic_ctx_free(bp);
8386 /* before free the vnic, undo the vnic tpa settings */
8387 if (bp->flags & BNXT_FLAG_TPA)
8388 bnxt_set_tpa(bp, false);
8389 bnxt_hwrm_vnic_free(bp);
8390 if (bp->flags & BNXT_FLAG_CHIP_P5)
8391 bnxt_hwrm_vnic_ctx_free(bp);
8394 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8397 bnxt_clear_vnic(bp);
8398 bnxt_hwrm_ring_free(bp, close_path);
8399 bnxt_hwrm_ring_grp_free(bp);
8401 bnxt_hwrm_stat_ctx_free(bp);
8402 bnxt_hwrm_free_tunnel_ports(bp);
8406 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8408 struct hwrm_func_cfg_input *req;
8412 if (br_mode == BRIDGE_MODE_VEB)
8413 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8414 else if (br_mode == BRIDGE_MODE_VEPA)
8415 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8419 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8423 req->fid = cpu_to_le16(0xffff);
8424 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8425 req->evb_mode = evb_mode;
8426 return hwrm_req_send(bp, req);
8429 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8431 struct hwrm_func_cfg_input *req;
8434 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8437 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8441 req->fid = cpu_to_le16(0xffff);
8442 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8443 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8445 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8447 return hwrm_req_send(bp, req);
8450 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8452 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8455 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8458 /* allocate context for vnic */
8459 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8461 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8463 goto vnic_setup_err;
8465 bp->rsscos_nr_ctxs++;
8467 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8468 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8470 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8472 goto vnic_setup_err;
8474 bp->rsscos_nr_ctxs++;
8478 /* configure default vnic, ring grp */
8479 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8481 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8483 goto vnic_setup_err;
8486 /* Enable RSS hashing on vnic */
8487 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8489 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8491 goto vnic_setup_err;
8494 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8495 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8497 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8506 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8510 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8511 for (i = 0; i < nr_ctxs; i++) {
8512 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8514 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8518 bp->rsscos_nr_ctxs++;
8523 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8525 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8529 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8531 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8535 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8536 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8538 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8545 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8547 if (bp->flags & BNXT_FLAG_CHIP_P5)
8548 return __bnxt_setup_vnic_p5(bp, vnic_id);
8550 return __bnxt_setup_vnic(bp, vnic_id);
8553 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8555 #ifdef CONFIG_RFS_ACCEL
8558 if (bp->flags & BNXT_FLAG_CHIP_P5)
8561 for (i = 0; i < bp->rx_nr_rings; i++) {
8562 struct bnxt_vnic_info *vnic;
8563 u16 vnic_id = i + 1;
8566 if (vnic_id >= bp->nr_vnics)
8569 vnic = &bp->vnic_info[vnic_id];
8570 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8571 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8572 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8573 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8575 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8579 rc = bnxt_setup_vnic(bp, vnic_id);
8589 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8590 static bool bnxt_promisc_ok(struct bnxt *bp)
8592 #ifdef CONFIG_BNXT_SRIOV
8593 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8599 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8601 unsigned int rc = 0;
8603 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8605 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8610 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8612 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8619 static int bnxt_cfg_rx_mode(struct bnxt *);
8620 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8622 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8624 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8626 unsigned int rx_nr_rings = bp->rx_nr_rings;
8629 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8631 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8637 rc = bnxt_hwrm_ring_alloc(bp);
8639 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8643 rc = bnxt_hwrm_ring_grp_alloc(bp);
8645 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8649 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8652 /* default vnic 0 */
8653 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8655 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8659 rc = bnxt_setup_vnic(bp, 0);
8663 if (bp->flags & BNXT_FLAG_RFS) {
8664 rc = bnxt_alloc_rfs_vnics(bp);
8669 if (bp->flags & BNXT_FLAG_TPA) {
8670 rc = bnxt_set_tpa(bp, true);
8676 bnxt_update_vf_mac(bp);
8678 /* Filter for default vnic 0 */
8679 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8681 if (BNXT_VF(bp) && rc == -ENODEV)
8682 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
8684 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8687 vnic->uc_filter_count = 1;
8690 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
8693 if (bp->dev->flags & IFF_BROADCAST)
8694 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8696 if (bp->dev->flags & IFF_PROMISC)
8697 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8699 if (bp->dev->flags & IFF_ALLMULTI) {
8700 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8701 vnic->mc_list_count = 0;
8702 } else if (bp->dev->flags & IFF_MULTICAST) {
8705 bnxt_mc_list_updated(bp, &mask);
8706 vnic->rx_mask |= mask;
8709 rc = bnxt_cfg_rx_mode(bp);
8714 rc = bnxt_hwrm_set_coal(bp);
8716 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8719 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8720 rc = bnxt_setup_nitroa0_vnic(bp);
8722 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8727 bnxt_hwrm_func_qcfg(bp);
8728 netdev_update_features(bp->dev);
8734 bnxt_hwrm_resource_free(bp, 0, true);
8739 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8741 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8745 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8747 bnxt_init_cp_rings(bp);
8748 bnxt_init_rx_rings(bp);
8749 bnxt_init_tx_rings(bp);
8750 bnxt_init_ring_grps(bp, irq_re_init);
8751 bnxt_init_vnics(bp);
8753 return bnxt_init_chip(bp, irq_re_init);
8756 static int bnxt_set_real_num_queues(struct bnxt *bp)
8759 struct net_device *dev = bp->dev;
8761 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8762 bp->tx_nr_rings_xdp);
8766 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8770 #ifdef CONFIG_RFS_ACCEL
8771 if (bp->flags & BNXT_FLAG_RFS)
8772 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8778 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8781 int _rx = *rx, _tx = *tx;
8784 *rx = min_t(int, _rx, max);
8785 *tx = min_t(int, _tx, max);
8790 while (_rx + _tx > max) {
8791 if (_rx > _tx && _rx > 1)
8802 static void bnxt_setup_msix(struct bnxt *bp)
8804 const int len = sizeof(bp->irq_tbl[0].name);
8805 struct net_device *dev = bp->dev;
8808 tcs = netdev_get_num_tc(dev);
8812 for (i = 0; i < tcs; i++) {
8813 count = bp->tx_nr_rings_per_tc;
8815 netdev_set_tc_queue(dev, i, count, off);
8819 for (i = 0; i < bp->cp_nr_rings; i++) {
8820 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8823 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8825 else if (i < bp->rx_nr_rings)
8830 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8832 bp->irq_tbl[map_idx].handler = bnxt_msix;
8836 static void bnxt_setup_inta(struct bnxt *bp)
8838 const int len = sizeof(bp->irq_tbl[0].name);
8840 if (netdev_get_num_tc(bp->dev))
8841 netdev_reset_tc(bp->dev);
8843 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8845 bp->irq_tbl[0].handler = bnxt_inta;
8848 static int bnxt_init_int_mode(struct bnxt *bp);
8850 static int bnxt_setup_int_mode(struct bnxt *bp)
8855 rc = bnxt_init_int_mode(bp);
8856 if (rc || !bp->irq_tbl)
8857 return rc ?: -ENODEV;
8860 if (bp->flags & BNXT_FLAG_USING_MSIX)
8861 bnxt_setup_msix(bp);
8863 bnxt_setup_inta(bp);
8865 rc = bnxt_set_real_num_queues(bp);
8869 #ifdef CONFIG_RFS_ACCEL
8870 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8872 return bp->hw_resc.max_rsscos_ctxs;
8875 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8877 return bp->hw_resc.max_vnics;
8881 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8883 return bp->hw_resc.max_stat_ctxs;
8886 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8888 return bp->hw_resc.max_cp_rings;
8891 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8893 unsigned int cp = bp->hw_resc.max_cp_rings;
8895 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8896 cp -= bnxt_get_ulp_msix_num(bp);
8901 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8903 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8905 if (bp->flags & BNXT_FLAG_CHIP_P5)
8906 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8908 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8911 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8913 bp->hw_resc.max_irqs = max_irqs;
8916 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8920 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8921 if (bp->flags & BNXT_FLAG_CHIP_P5)
8922 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8924 return cp - bp->cp_nr_rings;
8927 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8929 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8932 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8934 int max_cp = bnxt_get_max_func_cp_rings(bp);
8935 int max_irq = bnxt_get_max_func_irqs(bp);
8936 int total_req = bp->cp_nr_rings + num;
8937 int max_idx, avail_msix;
8939 max_idx = bp->total_irqs;
8940 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8941 max_idx = min_t(int, bp->total_irqs, max_cp);
8942 avail_msix = max_idx - bp->cp_nr_rings;
8943 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8946 if (max_irq < total_req) {
8947 num = max_irq - bp->cp_nr_rings;
8954 static int bnxt_get_num_msix(struct bnxt *bp)
8956 if (!BNXT_NEW_RM(bp))
8957 return bnxt_get_max_func_irqs(bp);
8959 return bnxt_nq_rings_in_use(bp);
8962 static int bnxt_init_msix(struct bnxt *bp)
8964 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8965 struct msix_entry *msix_ent;
8967 total_vecs = bnxt_get_num_msix(bp);
8968 max = bnxt_get_max_func_irqs(bp);
8969 if (total_vecs > max)
8975 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8979 for (i = 0; i < total_vecs; i++) {
8980 msix_ent[i].entry = i;
8981 msix_ent[i].vector = 0;
8984 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8987 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8988 ulp_msix = bnxt_get_ulp_msix_num(bp);
8989 if (total_vecs < 0 || total_vecs < ulp_msix) {
8991 goto msix_setup_exit;
8994 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8996 for (i = 0; i < total_vecs; i++)
8997 bp->irq_tbl[i].vector = msix_ent[i].vector;
8999 bp->total_irqs = total_vecs;
9000 /* Trim rings based upon num of vectors allocated */
9001 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
9002 total_vecs - ulp_msix, min == 1);
9004 goto msix_setup_exit;
9006 bp->cp_nr_rings = (min == 1) ?
9007 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9008 bp->tx_nr_rings + bp->rx_nr_rings;
9012 goto msix_setup_exit;
9014 bp->flags |= BNXT_FLAG_USING_MSIX;
9019 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
9022 pci_disable_msix(bp->pdev);
9027 static int bnxt_init_inta(struct bnxt *bp)
9029 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
9034 bp->rx_nr_rings = 1;
9035 bp->tx_nr_rings = 1;
9036 bp->cp_nr_rings = 1;
9037 bp->flags |= BNXT_FLAG_SHARED_RINGS;
9038 bp->irq_tbl[0].vector = bp->pdev->irq;
9042 static int bnxt_init_int_mode(struct bnxt *bp)
9046 if (bp->flags & BNXT_FLAG_MSIX_CAP)
9047 rc = bnxt_init_msix(bp);
9049 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
9050 /* fallback to INTA */
9051 rc = bnxt_init_inta(bp);
9056 static void bnxt_clear_int_mode(struct bnxt *bp)
9058 if (bp->flags & BNXT_FLAG_USING_MSIX)
9059 pci_disable_msix(bp->pdev);
9063 bp->flags &= ~BNXT_FLAG_USING_MSIX;
9066 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
9068 int tcs = netdev_get_num_tc(bp->dev);
9069 bool irq_cleared = false;
9072 if (!bnxt_need_reserve_rings(bp))
9075 if (irq_re_init && BNXT_NEW_RM(bp) &&
9076 bnxt_get_num_msix(bp) != bp->total_irqs) {
9077 bnxt_ulp_irq_stop(bp);
9078 bnxt_clear_int_mode(bp);
9081 rc = __bnxt_reserve_rings(bp);
9084 rc = bnxt_init_int_mode(bp);
9085 bnxt_ulp_irq_restart(bp, rc);
9088 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9091 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
9092 netdev_err(bp->dev, "tx ring reservation failure\n");
9093 netdev_reset_tc(bp->dev);
9094 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9100 static void bnxt_free_irq(struct bnxt *bp)
9102 struct bnxt_irq *irq;
9105 #ifdef CONFIG_RFS_ACCEL
9106 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9107 bp->dev->rx_cpu_rmap = NULL;
9109 if (!bp->irq_tbl || !bp->bnapi)
9112 for (i = 0; i < bp->cp_nr_rings; i++) {
9113 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9115 irq = &bp->irq_tbl[map_idx];
9116 if (irq->requested) {
9117 if (irq->have_cpumask) {
9118 irq_set_affinity_hint(irq->vector, NULL);
9119 free_cpumask_var(irq->cpu_mask);
9120 irq->have_cpumask = 0;
9122 free_irq(irq->vector, bp->bnapi[i]);
9129 static int bnxt_request_irq(struct bnxt *bp)
9132 unsigned long flags = 0;
9133 #ifdef CONFIG_RFS_ACCEL
9134 struct cpu_rmap *rmap;
9137 rc = bnxt_setup_int_mode(bp);
9139 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9143 #ifdef CONFIG_RFS_ACCEL
9144 rmap = bp->dev->rx_cpu_rmap;
9146 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9147 flags = IRQF_SHARED;
9149 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9150 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9151 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9153 #ifdef CONFIG_RFS_ACCEL
9154 if (rmap && bp->bnapi[i]->rx_ring) {
9155 rc = irq_cpu_rmap_add(rmap, irq->vector);
9157 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9162 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9169 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9170 int numa_node = dev_to_node(&bp->pdev->dev);
9172 irq->have_cpumask = 1;
9173 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9175 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9177 netdev_warn(bp->dev,
9178 "Set affinity failed, IRQ = %d\n",
9187 static void bnxt_del_napi(struct bnxt *bp)
9194 for (i = 0; i < bp->cp_nr_rings; i++) {
9195 struct bnxt_napi *bnapi = bp->bnapi[i];
9197 __netif_napi_del(&bnapi->napi);
9199 /* We called __netif_napi_del(), we need
9200 * to respect an RCU grace period before freeing napi structures.
9205 static void bnxt_init_napi(struct bnxt *bp)
9208 unsigned int cp_nr_rings = bp->cp_nr_rings;
9209 struct bnxt_napi *bnapi;
9211 if (bp->flags & BNXT_FLAG_USING_MSIX) {
9212 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9214 if (bp->flags & BNXT_FLAG_CHIP_P5)
9215 poll_fn = bnxt_poll_p5;
9216 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9218 for (i = 0; i < cp_nr_rings; i++) {
9219 bnapi = bp->bnapi[i];
9220 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
9222 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9223 bnapi = bp->bnapi[cp_nr_rings];
9224 netif_napi_add(bp->dev, &bnapi->napi,
9225 bnxt_poll_nitroa0, 64);
9228 bnapi = bp->bnapi[0];
9229 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
9233 static void bnxt_disable_napi(struct bnxt *bp)
9238 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9241 for (i = 0; i < bp->cp_nr_rings; i++) {
9242 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9244 napi_disable(&bp->bnapi[i]->napi);
9245 if (bp->bnapi[i]->rx_ring)
9246 cancel_work_sync(&cpr->dim.work);
9250 static void bnxt_enable_napi(struct bnxt *bp)
9254 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9255 for (i = 0; i < bp->cp_nr_rings; i++) {
9256 struct bnxt_napi *bnapi = bp->bnapi[i];
9257 struct bnxt_cp_ring_info *cpr;
9259 cpr = &bnapi->cp_ring;
9260 if (bnapi->in_reset)
9261 cpr->sw_stats.rx.rx_resets++;
9262 bnapi->in_reset = false;
9264 if (bnapi->rx_ring) {
9265 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9266 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9268 napi_enable(&bnapi->napi);
9272 void bnxt_tx_disable(struct bnxt *bp)
9275 struct bnxt_tx_ring_info *txr;
9278 for (i = 0; i < bp->tx_nr_rings; i++) {
9279 txr = &bp->tx_ring[i];
9280 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
9283 /* Make sure napi polls see @dev_state change */
9285 /* Drop carrier first to prevent TX timeout */
9286 netif_carrier_off(bp->dev);
9287 /* Stop all TX queues */
9288 netif_tx_disable(bp->dev);
9291 void bnxt_tx_enable(struct bnxt *bp)
9294 struct bnxt_tx_ring_info *txr;
9296 for (i = 0; i < bp->tx_nr_rings; i++) {
9297 txr = &bp->tx_ring[i];
9298 WRITE_ONCE(txr->dev_state, 0);
9300 /* Make sure napi polls see @dev_state change */
9302 netif_tx_wake_all_queues(bp->dev);
9303 if (BNXT_LINK_IS_UP(bp))
9304 netif_carrier_on(bp->dev);
9307 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9309 u8 active_fec = link_info->active_fec_sig_mode &
9310 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9312 switch (active_fec) {
9314 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9316 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9317 return "Clause 74 BaseR";
9318 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9319 return "Clause 91 RS(528,514)";
9320 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9321 return "Clause 91 RS544_1XN";
9322 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9323 return "Clause 91 RS(544,514)";
9324 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9325 return "Clause 91 RS272_1XN";
9326 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9327 return "Clause 91 RS(272,257)";
9331 void bnxt_report_link(struct bnxt *bp)
9333 if (BNXT_LINK_IS_UP(bp)) {
9334 const char *signal = "";
9335 const char *flow_ctrl;
9340 netif_carrier_on(bp->dev);
9341 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9342 if (speed == SPEED_UNKNOWN) {
9343 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9346 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9350 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9351 flow_ctrl = "ON - receive & transmit";
9352 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9353 flow_ctrl = "ON - transmit";
9354 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9355 flow_ctrl = "ON - receive";
9358 if (bp->link_info.phy_qcfg_resp.option_flags &
9359 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9360 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9361 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9363 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9366 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9373 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9374 speed, signal, duplex, flow_ctrl);
9375 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9376 netdev_info(bp->dev, "EEE is %s\n",
9377 bp->eee.eee_active ? "active" :
9379 fec = bp->link_info.fec_cfg;
9380 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9381 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9382 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9383 bnxt_report_fec(&bp->link_info));
9385 netif_carrier_off(bp->dev);
9386 netdev_err(bp->dev, "NIC Link is Down\n");
9390 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9392 if (!resp->supported_speeds_auto_mode &&
9393 !resp->supported_speeds_force_mode &&
9394 !resp->supported_pam4_speeds_auto_mode &&
9395 !resp->supported_pam4_speeds_force_mode)
9400 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9402 struct bnxt_link_info *link_info = &bp->link_info;
9403 struct hwrm_port_phy_qcaps_output *resp;
9404 struct hwrm_port_phy_qcaps_input *req;
9407 if (bp->hwrm_spec_code < 0x10201)
9410 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9414 resp = hwrm_req_hold(bp, req);
9415 rc = hwrm_req_send(bp, req);
9417 goto hwrm_phy_qcaps_exit;
9419 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
9420 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9421 struct ethtool_eee *eee = &bp->eee;
9422 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9424 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9425 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9426 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9427 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9428 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9431 if (bp->hwrm_spec_code >= 0x10a01) {
9432 if (bnxt_phy_qcaps_no_speed(resp)) {
9433 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9434 netdev_warn(bp->dev, "Ethernet link disabled\n");
9435 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9436 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9437 netdev_info(bp->dev, "Ethernet link enabled\n");
9438 /* Phy re-enabled, reprobe the speeds */
9439 link_info->support_auto_speeds = 0;
9440 link_info->support_pam4_auto_speeds = 0;
9443 if (resp->supported_speeds_auto_mode)
9444 link_info->support_auto_speeds =
9445 le16_to_cpu(resp->supported_speeds_auto_mode);
9446 if (resp->supported_pam4_speeds_auto_mode)
9447 link_info->support_pam4_auto_speeds =
9448 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9450 bp->port_count = resp->port_cnt;
9452 hwrm_phy_qcaps_exit:
9453 hwrm_req_drop(bp, req);
9457 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9459 u16 diff = advertising ^ supported;
9461 return ((supported | diff) != supported);
9464 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9466 struct bnxt_link_info *link_info = &bp->link_info;
9467 struct hwrm_port_phy_qcfg_output *resp;
9468 struct hwrm_port_phy_qcfg_input *req;
9469 u8 link_state = link_info->link_state;
9470 bool support_changed = false;
9473 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9477 resp = hwrm_req_hold(bp, req);
9478 rc = hwrm_req_send(bp, req);
9480 hwrm_req_drop(bp, req);
9481 if (BNXT_VF(bp) && rc == -ENODEV) {
9482 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
9488 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9489 link_info->phy_link_status = resp->link;
9490 link_info->duplex = resp->duplex_cfg;
9491 if (bp->hwrm_spec_code >= 0x10800)
9492 link_info->duplex = resp->duplex_state;
9493 link_info->pause = resp->pause;
9494 link_info->auto_mode = resp->auto_mode;
9495 link_info->auto_pause_setting = resp->auto_pause;
9496 link_info->lp_pause = resp->link_partner_adv_pause;
9497 link_info->force_pause_setting = resp->force_pause;
9498 link_info->duplex_setting = resp->duplex_cfg;
9499 if (link_info->phy_link_status == BNXT_LINK_LINK)
9500 link_info->link_speed = le16_to_cpu(resp->link_speed);
9502 link_info->link_speed = 0;
9503 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9504 link_info->force_pam4_link_speed =
9505 le16_to_cpu(resp->force_pam4_link_speed);
9506 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9507 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9508 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9509 link_info->auto_pam4_link_speeds =
9510 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9511 link_info->lp_auto_link_speeds =
9512 le16_to_cpu(resp->link_partner_adv_speeds);
9513 link_info->lp_auto_pam4_link_speeds =
9514 resp->link_partner_pam4_adv_speeds;
9515 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9516 link_info->phy_ver[0] = resp->phy_maj;
9517 link_info->phy_ver[1] = resp->phy_min;
9518 link_info->phy_ver[2] = resp->phy_bld;
9519 link_info->media_type = resp->media_type;
9520 link_info->phy_type = resp->phy_type;
9521 link_info->transceiver = resp->xcvr_pkg_type;
9522 link_info->phy_addr = resp->eee_config_phy_addr &
9523 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9524 link_info->module_status = resp->module_status;
9526 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9527 struct ethtool_eee *eee = &bp->eee;
9530 eee->eee_active = 0;
9531 if (resp->eee_config_phy_addr &
9532 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9533 eee->eee_active = 1;
9534 fw_speeds = le16_to_cpu(
9535 resp->link_partner_adv_eee_link_speed_mask);
9536 eee->lp_advertised =
9537 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9540 /* Pull initial EEE config */
9541 if (!chng_link_state) {
9542 if (resp->eee_config_phy_addr &
9543 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9544 eee->eee_enabled = 1;
9546 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9548 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9550 if (resp->eee_config_phy_addr &
9551 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9554 eee->tx_lpi_enabled = 1;
9555 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9556 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9557 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9562 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9563 if (bp->hwrm_spec_code >= 0x10504) {
9564 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9565 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9567 /* TODO: need to add more logic to report VF link */
9568 if (chng_link_state) {
9569 if (link_info->phy_link_status == BNXT_LINK_LINK)
9570 link_info->link_state = BNXT_LINK_STATE_UP;
9572 link_info->link_state = BNXT_LINK_STATE_DOWN;
9573 if (link_state != link_info->link_state)
9574 bnxt_report_link(bp);
9576 /* always link down if not require to update link state */
9577 link_info->link_state = BNXT_LINK_STATE_DOWN;
9579 hwrm_req_drop(bp, req);
9581 if (!BNXT_PHY_CFG_ABLE(bp))
9584 /* Check if any advertised speeds are no longer supported. The caller
9585 * holds the link_lock mutex, so we can modify link_info settings.
9587 if (bnxt_support_dropped(link_info->advertising,
9588 link_info->support_auto_speeds)) {
9589 link_info->advertising = link_info->support_auto_speeds;
9590 support_changed = true;
9592 if (bnxt_support_dropped(link_info->advertising_pam4,
9593 link_info->support_pam4_auto_speeds)) {
9594 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9595 support_changed = true;
9597 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9598 bnxt_hwrm_set_link_setting(bp, true, false);
9602 static void bnxt_get_port_module_status(struct bnxt *bp)
9604 struct bnxt_link_info *link_info = &bp->link_info;
9605 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9608 if (bnxt_update_link(bp, true))
9611 module_status = link_info->module_status;
9612 switch (module_status) {
9613 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9614 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9615 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9616 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9618 if (bp->hwrm_spec_code >= 0x10201) {
9619 netdev_warn(bp->dev, "Module part number %s\n",
9620 resp->phy_vendor_partnumber);
9622 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9623 netdev_warn(bp->dev, "TX is disabled\n");
9624 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9625 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9630 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9632 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9633 if (bp->hwrm_spec_code >= 0x10201)
9635 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9636 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9637 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9638 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9639 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9641 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9643 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9644 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9645 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9646 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9648 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9649 if (bp->hwrm_spec_code >= 0x10201) {
9650 req->auto_pause = req->force_pause;
9651 req->enables |= cpu_to_le32(
9652 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9657 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9659 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9660 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9661 if (bp->link_info.advertising) {
9662 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9663 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9665 if (bp->link_info.advertising_pam4) {
9667 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9668 req->auto_link_pam4_speed_mask =
9669 cpu_to_le16(bp->link_info.advertising_pam4);
9671 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9672 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9674 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9675 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9676 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9677 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9679 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9683 /* tell chimp that the setting takes effect immediately */
9684 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9687 int bnxt_hwrm_set_pause(struct bnxt *bp)
9689 struct hwrm_port_phy_cfg_input *req;
9692 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9696 bnxt_hwrm_set_pause_common(bp, req);
9698 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9699 bp->link_info.force_link_chng)
9700 bnxt_hwrm_set_link_common(bp, req);
9702 rc = hwrm_req_send(bp, req);
9703 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9704 /* since changing of pause setting doesn't trigger any link
9705 * change event, the driver needs to update the current pause
9706 * result upon successfully return of the phy_cfg command
9708 bp->link_info.pause =
9709 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9710 bp->link_info.auto_pause_setting = 0;
9711 if (!bp->link_info.force_link_chng)
9712 bnxt_report_link(bp);
9714 bp->link_info.force_link_chng = false;
9718 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9719 struct hwrm_port_phy_cfg_input *req)
9721 struct ethtool_eee *eee = &bp->eee;
9723 if (eee->eee_enabled) {
9725 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9727 if (eee->tx_lpi_enabled)
9728 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9730 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9732 req->flags |= cpu_to_le32(flags);
9733 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9734 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9735 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9737 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9741 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9743 struct hwrm_port_phy_cfg_input *req;
9746 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9751 bnxt_hwrm_set_pause_common(bp, req);
9753 bnxt_hwrm_set_link_common(bp, req);
9756 bnxt_hwrm_set_eee(bp, req);
9757 return hwrm_req_send(bp, req);
9760 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9762 struct hwrm_port_phy_cfg_input *req;
9765 if (!BNXT_SINGLE_PF(bp))
9768 if (pci_num_vf(bp->pdev) &&
9769 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9772 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9776 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9777 rc = hwrm_req_send(bp, req);
9779 mutex_lock(&bp->link_lock);
9780 /* Device is not obliged link down in certain scenarios, even
9781 * when forced. Setting the state unknown is consistent with
9782 * driver startup and will force link state to be reported
9783 * during subsequent open based on PORT_PHY_QCFG.
9785 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
9786 mutex_unlock(&bp->link_lock);
9791 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9793 #ifdef CONFIG_TEE_BNXT_FW
9794 int rc = tee_bnxt_fw_load();
9797 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9801 netdev_err(bp->dev, "OP-TEE not supported\n");
9806 static int bnxt_try_recover_fw(struct bnxt *bp)
9808 if (bp->fw_health && bp->fw_health->status_reliable) {
9813 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9814 rc = bnxt_hwrm_poll(bp);
9815 if (!BNXT_FW_IS_BOOTING(sts) &&
9816 !BNXT_FW_IS_RECOVERING(sts))
9819 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9821 if (!BNXT_FW_IS_HEALTHY(sts)) {
9823 "Firmware not responding, status: 0x%x\n",
9827 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9828 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9829 return bnxt_fw_reset_via_optee(bp);
9837 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
9839 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9842 if (!BNXT_NEW_RM(bp))
9843 return 0; /* no resource reservations required */
9845 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9847 netdev_err(bp->dev, "resc_qcaps failed\n");
9849 hw_resc->resv_cp_rings = 0;
9850 hw_resc->resv_stat_ctxs = 0;
9851 hw_resc->resv_irqs = 0;
9852 hw_resc->resv_tx_rings = 0;
9853 hw_resc->resv_rx_rings = 0;
9854 hw_resc->resv_hw_ring_grps = 0;
9855 hw_resc->resv_vnics = 0;
9857 bp->tx_nr_rings = 0;
9858 bp->rx_nr_rings = 0;
9864 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9866 struct hwrm_func_drv_if_change_output *resp;
9867 struct hwrm_func_drv_if_change_input *req;
9868 bool fw_reset = !bp->irq_tbl;
9869 bool resc_reinit = false;
9873 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9876 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
9881 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9882 resp = hwrm_req_hold(bp, req);
9884 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
9885 while (retry < BNXT_FW_IF_RETRY) {
9886 rc = hwrm_req_send(bp, req);
9894 if (rc == -EAGAIN) {
9895 hwrm_req_drop(bp, req);
9898 flags = le32_to_cpu(resp->flags);
9900 rc = bnxt_try_recover_fw(bp);
9903 hwrm_req_drop(bp, req);
9908 bnxt_inv_fw_health_reg(bp);
9912 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9914 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9917 bnxt_remap_fw_health_regs(bp);
9919 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9920 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9921 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9924 if (resc_reinit || fw_reset) {
9926 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9927 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9929 bnxt_free_ctx_mem(bp);
9933 rc = bnxt_fw_init_one(bp);
9935 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9936 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9939 bnxt_clear_int_mode(bp);
9940 rc = bnxt_init_int_mode(bp);
9942 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9943 netdev_err(bp->dev, "init int mode failed\n");
9947 rc = bnxt_cancel_reservations(bp, fw_reset);
9952 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9954 struct hwrm_port_led_qcaps_output *resp;
9955 struct hwrm_port_led_qcaps_input *req;
9956 struct bnxt_pf_info *pf = &bp->pf;
9960 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9963 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
9967 req->port_id = cpu_to_le16(pf->port_id);
9968 resp = hwrm_req_hold(bp, req);
9969 rc = hwrm_req_send(bp, req);
9971 hwrm_req_drop(bp, req);
9974 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9977 bp->num_leds = resp->num_leds;
9978 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9980 for (i = 0; i < bp->num_leds; i++) {
9981 struct bnxt_led_info *led = &bp->leds[i];
9982 __le16 caps = led->led_state_caps;
9984 if (!led->led_group_id ||
9985 !BNXT_LED_ALT_BLINK_CAP(caps)) {
9991 hwrm_req_drop(bp, req);
9995 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9997 struct hwrm_wol_filter_alloc_output *resp;
9998 struct hwrm_wol_filter_alloc_input *req;
10001 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
10005 req->port_id = cpu_to_le16(bp->pf.port_id);
10006 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
10007 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
10008 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
10010 resp = hwrm_req_hold(bp, req);
10011 rc = hwrm_req_send(bp, req);
10013 bp->wol_filter_id = resp->wol_filter_id;
10014 hwrm_req_drop(bp, req);
10018 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
10020 struct hwrm_wol_filter_free_input *req;
10023 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
10027 req->port_id = cpu_to_le16(bp->pf.port_id);
10028 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
10029 req->wol_filter_id = bp->wol_filter_id;
10031 return hwrm_req_send(bp, req);
10034 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
10036 struct hwrm_wol_filter_qcfg_output *resp;
10037 struct hwrm_wol_filter_qcfg_input *req;
10038 u16 next_handle = 0;
10041 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
10045 req->port_id = cpu_to_le16(bp->pf.port_id);
10046 req->handle = cpu_to_le16(handle);
10047 resp = hwrm_req_hold(bp, req);
10048 rc = hwrm_req_send(bp, req);
10050 next_handle = le16_to_cpu(resp->next_handle);
10051 if (next_handle != 0) {
10052 if (resp->wol_type ==
10053 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
10055 bp->wol_filter_id = resp->wol_filter_id;
10059 hwrm_req_drop(bp, req);
10060 return next_handle;
10063 static void bnxt_get_wol_settings(struct bnxt *bp)
10068 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
10072 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
10073 } while (handle && handle != 0xffff);
10076 #ifdef CONFIG_BNXT_HWMON
10077 static ssize_t bnxt_show_temp(struct device *dev,
10078 struct device_attribute *devattr, char *buf)
10080 struct hwrm_temp_monitor_query_output *resp;
10081 struct hwrm_temp_monitor_query_input *req;
10082 struct bnxt *bp = dev_get_drvdata(dev);
10086 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10089 resp = hwrm_req_hold(bp, req);
10090 rc = hwrm_req_send(bp, req);
10092 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
10093 hwrm_req_drop(bp, req);
10098 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
10100 static struct attribute *bnxt_attrs[] = {
10101 &sensor_dev_attr_temp1_input.dev_attr.attr,
10104 ATTRIBUTE_GROUPS(bnxt);
10106 static void bnxt_hwmon_close(struct bnxt *bp)
10108 if (bp->hwmon_dev) {
10109 hwmon_device_unregister(bp->hwmon_dev);
10110 bp->hwmon_dev = NULL;
10114 static void bnxt_hwmon_open(struct bnxt *bp)
10116 struct hwrm_temp_monitor_query_input *req;
10117 struct pci_dev *pdev = bp->pdev;
10120 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10122 rc = hwrm_req_send_silent(bp, req);
10123 if (rc == -EACCES || rc == -EOPNOTSUPP) {
10124 bnxt_hwmon_close(bp);
10131 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10132 DRV_MODULE_NAME, bp,
10134 if (IS_ERR(bp->hwmon_dev)) {
10135 bp->hwmon_dev = NULL;
10136 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10140 static void bnxt_hwmon_close(struct bnxt *bp)
10144 static void bnxt_hwmon_open(struct bnxt *bp)
10149 static bool bnxt_eee_config_ok(struct bnxt *bp)
10151 struct ethtool_eee *eee = &bp->eee;
10152 struct bnxt_link_info *link_info = &bp->link_info;
10154 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10157 if (eee->eee_enabled) {
10159 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10161 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10162 eee->eee_enabled = 0;
10165 if (eee->advertised & ~advertising) {
10166 eee->advertised = advertising & eee->supported;
10173 static int bnxt_update_phy_setting(struct bnxt *bp)
10176 bool update_link = false;
10177 bool update_pause = false;
10178 bool update_eee = false;
10179 struct bnxt_link_info *link_info = &bp->link_info;
10181 rc = bnxt_update_link(bp, true);
10183 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10187 if (!BNXT_SINGLE_PF(bp))
10190 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10191 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10192 link_info->req_flow_ctrl)
10193 update_pause = true;
10194 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10195 link_info->force_pause_setting != link_info->req_flow_ctrl)
10196 update_pause = true;
10197 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10198 if (BNXT_AUTO_MODE(link_info->auto_mode))
10199 update_link = true;
10200 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10201 link_info->req_link_speed != link_info->force_link_speed)
10202 update_link = true;
10203 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10204 link_info->req_link_speed != link_info->force_pam4_link_speed)
10205 update_link = true;
10206 if (link_info->req_duplex != link_info->duplex_setting)
10207 update_link = true;
10209 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10210 update_link = true;
10211 if (link_info->advertising != link_info->auto_link_speeds ||
10212 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10213 update_link = true;
10216 /* The last close may have shutdown the link, so need to call
10217 * PHY_CFG to bring it back up.
10219 if (!BNXT_LINK_IS_UP(bp))
10220 update_link = true;
10222 if (!bnxt_eee_config_ok(bp))
10226 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10227 else if (update_pause)
10228 rc = bnxt_hwrm_set_pause(bp);
10230 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10238 /* Common routine to pre-map certain register block to different GRC window.
10239 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10240 * in PF and 3 windows in VF that can be customized to map in different
10243 static void bnxt_preset_reg_win(struct bnxt *bp)
10246 /* CAG registers map to GRC window #4 */
10247 writel(BNXT_CAG_REG_BASE,
10248 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10252 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10254 static int bnxt_reinit_after_abort(struct bnxt *bp)
10258 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10261 if (bp->dev->reg_state == NETREG_UNREGISTERED)
10264 rc = bnxt_fw_init_one(bp);
10266 bnxt_clear_int_mode(bp);
10267 rc = bnxt_init_int_mode(bp);
10269 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10270 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10276 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10280 bnxt_preset_reg_win(bp);
10281 netif_carrier_off(bp->dev);
10283 /* Reserve rings now if none were reserved at driver probe. */
10284 rc = bnxt_init_dflt_ring_mode(bp);
10286 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10290 rc = bnxt_reserve_rings(bp, irq_re_init);
10293 if ((bp->flags & BNXT_FLAG_RFS) &&
10294 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10295 /* disable RFS if falling back to INTA */
10296 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10297 bp->flags &= ~BNXT_FLAG_RFS;
10300 rc = bnxt_alloc_mem(bp, irq_re_init);
10302 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10303 goto open_err_free_mem;
10307 bnxt_init_napi(bp);
10308 rc = bnxt_request_irq(bp);
10310 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10315 rc = bnxt_init_nic(bp, irq_re_init);
10317 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10321 bnxt_enable_napi(bp);
10322 bnxt_debug_dev_init(bp);
10324 if (link_re_init) {
10325 mutex_lock(&bp->link_lock);
10326 rc = bnxt_update_phy_setting(bp);
10327 mutex_unlock(&bp->link_lock);
10329 netdev_warn(bp->dev, "failed to update phy settings\n");
10330 if (BNXT_SINGLE_PF(bp)) {
10331 bp->link_info.phy_retry = true;
10332 bp->link_info.phy_retry_expires =
10339 udp_tunnel_nic_reset_ntf(bp->dev);
10341 set_bit(BNXT_STATE_OPEN, &bp->state);
10342 bnxt_enable_int(bp);
10343 /* Enable TX queues */
10344 bnxt_tx_enable(bp);
10345 mod_timer(&bp->timer, jiffies + bp->current_interval);
10346 /* Poll link status and check for SFP+ module status */
10347 mutex_lock(&bp->link_lock);
10348 bnxt_get_port_module_status(bp);
10349 mutex_unlock(&bp->link_lock);
10351 /* VF-reps may need to be re-opened after the PF is re-opened */
10353 bnxt_vf_reps_open(bp);
10354 bnxt_ptp_init_rtc(bp, true);
10361 bnxt_free_skbs(bp);
10363 bnxt_free_mem(bp, true);
10367 /* rtnl_lock held */
10368 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10372 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10375 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10377 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10378 dev_close(bp->dev);
10383 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10384 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10387 int bnxt_half_open_nic(struct bnxt *bp)
10391 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10392 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10394 goto half_open_err;
10397 rc = bnxt_alloc_mem(bp, true);
10399 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10400 goto half_open_err;
10402 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10403 rc = bnxt_init_nic(bp, true);
10405 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10406 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10407 goto half_open_err;
10412 bnxt_free_skbs(bp);
10413 bnxt_free_mem(bp, true);
10414 dev_close(bp->dev);
10418 /* rtnl_lock held, this call can only be made after a previous successful
10419 * call to bnxt_half_open_nic().
10421 void bnxt_half_close_nic(struct bnxt *bp)
10423 bnxt_hwrm_resource_free(bp, false, true);
10424 bnxt_free_skbs(bp);
10425 bnxt_free_mem(bp, true);
10426 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10429 void bnxt_reenable_sriov(struct bnxt *bp)
10432 struct bnxt_pf_info *pf = &bp->pf;
10433 int n = pf->active_vfs;
10436 bnxt_cfg_hw_sriov(bp, &n, true);
10440 static int bnxt_open(struct net_device *dev)
10442 struct bnxt *bp = netdev_priv(dev);
10445 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10446 rc = bnxt_reinit_after_abort(bp);
10449 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10451 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10456 rc = bnxt_hwrm_if_change(bp, true);
10460 rc = __bnxt_open_nic(bp, true, true);
10462 bnxt_hwrm_if_change(bp, false);
10464 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10465 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10466 bnxt_ulp_start(bp, 0);
10467 bnxt_reenable_sriov(bp);
10470 bnxt_hwmon_open(bp);
10476 static bool bnxt_drv_busy(struct bnxt *bp)
10478 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10479 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10482 static void bnxt_get_ring_stats(struct bnxt *bp,
10483 struct rtnl_link_stats64 *stats);
10485 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10488 /* Close the VF-reps before closing PF */
10490 bnxt_vf_reps_close(bp);
10492 /* Change device state to avoid TX queue wake up's */
10493 bnxt_tx_disable(bp);
10495 clear_bit(BNXT_STATE_OPEN, &bp->state);
10496 smp_mb__after_atomic();
10497 while (bnxt_drv_busy(bp))
10500 /* Flush rings and and disable interrupts */
10501 bnxt_shutdown_nic(bp, irq_re_init);
10503 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10505 bnxt_debug_dev_exit(bp);
10506 bnxt_disable_napi(bp);
10507 del_timer_sync(&bp->timer);
10508 bnxt_free_skbs(bp);
10510 /* Save ring stats before shutdown */
10511 if (bp->bnapi && irq_re_init)
10512 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10517 bnxt_free_mem(bp, irq_re_init);
10520 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10524 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10525 /* If we get here, it means firmware reset is in progress
10526 * while we are trying to close. We can safely proceed with
10527 * the close because we are holding rtnl_lock(). Some firmware
10528 * messages may fail as we proceed to close. We set the
10529 * ABORT_ERR flag here so that the FW reset thread will later
10530 * abort when it gets the rtnl_lock() and sees the flag.
10532 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10533 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10536 #ifdef CONFIG_BNXT_SRIOV
10537 if (bp->sriov_cfg) {
10538 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10540 BNXT_SRIOV_CFG_WAIT_TMO);
10542 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10545 __bnxt_close_nic(bp, irq_re_init, link_re_init);
10549 static int bnxt_close(struct net_device *dev)
10551 struct bnxt *bp = netdev_priv(dev);
10553 bnxt_hwmon_close(bp);
10554 bnxt_close_nic(bp, true, true);
10555 bnxt_hwrm_shutdown_link(bp);
10556 bnxt_hwrm_if_change(bp, false);
10560 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10563 struct hwrm_port_phy_mdio_read_output *resp;
10564 struct hwrm_port_phy_mdio_read_input *req;
10567 if (bp->hwrm_spec_code < 0x10a00)
10568 return -EOPNOTSUPP;
10570 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10574 req->port_id = cpu_to_le16(bp->pf.port_id);
10575 req->phy_addr = phy_addr;
10576 req->reg_addr = cpu_to_le16(reg & 0x1f);
10577 if (mdio_phy_id_is_c45(phy_addr)) {
10578 req->cl45_mdio = 1;
10579 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10580 req->dev_addr = mdio_phy_id_devad(phy_addr);
10581 req->reg_addr = cpu_to_le16(reg);
10584 resp = hwrm_req_hold(bp, req);
10585 rc = hwrm_req_send(bp, req);
10587 *val = le16_to_cpu(resp->reg_data);
10588 hwrm_req_drop(bp, req);
10592 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10595 struct hwrm_port_phy_mdio_write_input *req;
10598 if (bp->hwrm_spec_code < 0x10a00)
10599 return -EOPNOTSUPP;
10601 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10605 req->port_id = cpu_to_le16(bp->pf.port_id);
10606 req->phy_addr = phy_addr;
10607 req->reg_addr = cpu_to_le16(reg & 0x1f);
10608 if (mdio_phy_id_is_c45(phy_addr)) {
10609 req->cl45_mdio = 1;
10610 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10611 req->dev_addr = mdio_phy_id_devad(phy_addr);
10612 req->reg_addr = cpu_to_le16(reg);
10614 req->reg_data = cpu_to_le16(val);
10616 return hwrm_req_send(bp, req);
10619 /* rtnl_lock held */
10620 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10622 struct mii_ioctl_data *mdio = if_mii(ifr);
10623 struct bnxt *bp = netdev_priv(dev);
10628 mdio->phy_id = bp->link_info.phy_addr;
10631 case SIOCGMIIREG: {
10632 u16 mii_regval = 0;
10634 if (!netif_running(dev))
10637 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10639 mdio->val_out = mii_regval;
10644 if (!netif_running(dev))
10647 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10650 case SIOCSHWTSTAMP:
10651 return bnxt_hwtstamp_set(dev, ifr);
10653 case SIOCGHWTSTAMP:
10654 return bnxt_hwtstamp_get(dev, ifr);
10660 return -EOPNOTSUPP;
10663 static void bnxt_get_ring_stats(struct bnxt *bp,
10664 struct rtnl_link_stats64 *stats)
10668 for (i = 0; i < bp->cp_nr_rings; i++) {
10669 struct bnxt_napi *bnapi = bp->bnapi[i];
10670 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10671 u64 *sw = cpr->stats.sw_stats;
10673 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10674 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10675 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10677 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10678 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10679 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10681 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10682 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10683 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10685 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10686 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10687 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10689 stats->rx_missed_errors +=
10690 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10692 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10694 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10696 stats->rx_dropped +=
10697 cpr->sw_stats.rx.rx_netpoll_discards +
10698 cpr->sw_stats.rx.rx_oom_discards;
10702 static void bnxt_add_prev_stats(struct bnxt *bp,
10703 struct rtnl_link_stats64 *stats)
10705 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10707 stats->rx_packets += prev_stats->rx_packets;
10708 stats->tx_packets += prev_stats->tx_packets;
10709 stats->rx_bytes += prev_stats->rx_bytes;
10710 stats->tx_bytes += prev_stats->tx_bytes;
10711 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10712 stats->multicast += prev_stats->multicast;
10713 stats->rx_dropped += prev_stats->rx_dropped;
10714 stats->tx_dropped += prev_stats->tx_dropped;
10718 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10720 struct bnxt *bp = netdev_priv(dev);
10722 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10723 /* Make sure bnxt_close_nic() sees that we are reading stats before
10724 * we check the BNXT_STATE_OPEN flag.
10726 smp_mb__after_atomic();
10727 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10728 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10729 *stats = bp->net_stats_prev;
10733 bnxt_get_ring_stats(bp, stats);
10734 bnxt_add_prev_stats(bp, stats);
10736 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10737 u64 *rx = bp->port_stats.sw_stats;
10738 u64 *tx = bp->port_stats.sw_stats +
10739 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10741 stats->rx_crc_errors =
10742 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10743 stats->rx_frame_errors =
10744 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10745 stats->rx_length_errors =
10746 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10747 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10748 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10750 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10751 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10752 stats->collisions =
10753 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10754 stats->tx_fifo_errors =
10755 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10756 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10758 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10761 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10763 struct net_device *dev = bp->dev;
10764 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10765 struct netdev_hw_addr *ha;
10768 bool update = false;
10771 netdev_for_each_mc_addr(ha, dev) {
10772 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10773 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10774 vnic->mc_list_count = 0;
10778 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10779 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10786 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10788 if (mc_count != vnic->mc_list_count) {
10789 vnic->mc_list_count = mc_count;
10795 static bool bnxt_uc_list_updated(struct bnxt *bp)
10797 struct net_device *dev = bp->dev;
10798 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10799 struct netdev_hw_addr *ha;
10802 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10805 netdev_for_each_uc_addr(ha, dev) {
10806 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10814 static void bnxt_set_rx_mode(struct net_device *dev)
10816 struct bnxt *bp = netdev_priv(dev);
10817 struct bnxt_vnic_info *vnic;
10818 bool mc_update = false;
10822 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10825 vnic = &bp->vnic_info[0];
10826 mask = vnic->rx_mask;
10827 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10828 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10829 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10830 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10832 if (dev->flags & IFF_PROMISC)
10833 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10835 uc_update = bnxt_uc_list_updated(bp);
10837 if (dev->flags & IFF_BROADCAST)
10838 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10839 if (dev->flags & IFF_ALLMULTI) {
10840 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10841 vnic->mc_list_count = 0;
10842 } else if (dev->flags & IFF_MULTICAST) {
10843 mc_update = bnxt_mc_list_updated(bp, &mask);
10846 if (mask != vnic->rx_mask || uc_update || mc_update) {
10847 vnic->rx_mask = mask;
10849 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10850 bnxt_queue_sp_work(bp);
10854 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10856 struct net_device *dev = bp->dev;
10857 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10858 struct hwrm_cfa_l2_filter_free_input *req;
10859 struct netdev_hw_addr *ha;
10860 int i, off = 0, rc;
10863 netif_addr_lock_bh(dev);
10864 uc_update = bnxt_uc_list_updated(bp);
10865 netif_addr_unlock_bh(dev);
10870 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
10873 hwrm_req_hold(bp, req);
10874 for (i = 1; i < vnic->uc_filter_count; i++) {
10875 req->l2_filter_id = vnic->fw_l2_filter_id[i];
10877 rc = hwrm_req_send(bp, req);
10879 hwrm_req_drop(bp, req);
10881 vnic->uc_filter_count = 1;
10883 netif_addr_lock_bh(dev);
10884 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10885 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10887 netdev_for_each_uc_addr(ha, dev) {
10888 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10890 vnic->uc_filter_count++;
10893 netif_addr_unlock_bh(dev);
10895 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10896 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10898 if (BNXT_VF(bp) && rc == -ENODEV) {
10899 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
10900 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
10902 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
10905 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
10907 vnic->uc_filter_count = i;
10911 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
10912 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
10915 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10916 !bnxt_promisc_ok(bp))
10917 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10918 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10919 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
10920 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10922 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10923 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10924 vnic->mc_list_count = 0;
10925 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10928 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10934 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10936 #ifdef CONFIG_BNXT_SRIOV
10937 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10938 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10940 /* No minimum rings were provisioned by the PF. Don't
10941 * reserve rings by default when device is down.
10943 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10946 if (!netif_running(bp->dev))
10953 /* If the chip and firmware supports RFS */
10954 static bool bnxt_rfs_supported(struct bnxt *bp)
10956 if (bp->flags & BNXT_FLAG_CHIP_P5) {
10957 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10961 /* 212 firmware is broken for aRFS */
10962 if (BNXT_FW_MAJ(bp) == 212)
10964 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10966 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10971 /* If runtime conditions support RFS */
10972 static bool bnxt_rfs_capable(struct bnxt *bp)
10974 #ifdef CONFIG_RFS_ACCEL
10975 int vnics, max_vnics, max_rss_ctxs;
10977 if (bp->flags & BNXT_FLAG_CHIP_P5)
10978 return bnxt_rfs_supported(bp);
10979 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10982 vnics = 1 + bp->rx_nr_rings;
10983 max_vnics = bnxt_get_max_func_vnics(bp);
10984 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10986 /* RSS contexts not a limiting factor */
10987 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10988 max_rss_ctxs = max_vnics;
10989 if (vnics > max_vnics || vnics > max_rss_ctxs) {
10990 if (bp->rx_nr_rings > 1)
10991 netdev_warn(bp->dev,
10992 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10993 min(max_rss_ctxs - 1, max_vnics - 1));
10997 if (!BNXT_NEW_RM(bp))
11000 if (vnics == bp->hw_resc.resv_vnics)
11003 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
11004 if (vnics <= bp->hw_resc.resv_vnics)
11007 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
11008 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
11015 static netdev_features_t bnxt_fix_features(struct net_device *dev,
11016 netdev_features_t features)
11018 struct bnxt *bp = netdev_priv(dev);
11019 netdev_features_t vlan_features;
11021 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
11022 features &= ~NETIF_F_NTUPLE;
11024 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
11025 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11027 if (!(features & NETIF_F_GRO))
11028 features &= ~NETIF_F_GRO_HW;
11030 if (features & NETIF_F_GRO_HW)
11031 features &= ~NETIF_F_LRO;
11033 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
11034 * turned on or off together.
11036 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
11037 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
11038 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11039 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
11040 else if (vlan_features)
11041 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
11043 #ifdef CONFIG_BNXT_SRIOV
11044 if (BNXT_VF(bp) && bp->vf.vlan)
11045 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
11050 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
11052 struct bnxt *bp = netdev_priv(dev);
11053 u32 flags = bp->flags;
11056 bool re_init = false;
11057 bool update_tpa = false;
11059 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
11060 if (features & NETIF_F_GRO_HW)
11061 flags |= BNXT_FLAG_GRO;
11062 else if (features & NETIF_F_LRO)
11063 flags |= BNXT_FLAG_LRO;
11065 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
11066 flags &= ~BNXT_FLAG_TPA;
11068 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11069 flags |= BNXT_FLAG_STRIP_VLAN;
11071 if (features & NETIF_F_NTUPLE)
11072 flags |= BNXT_FLAG_RFS;
11074 changes = flags ^ bp->flags;
11075 if (changes & BNXT_FLAG_TPA) {
11077 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
11078 (flags & BNXT_FLAG_TPA) == 0 ||
11079 (bp->flags & BNXT_FLAG_CHIP_P5))
11083 if (changes & ~BNXT_FLAG_TPA)
11086 if (flags != bp->flags) {
11087 u32 old_flags = bp->flags;
11089 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11092 bnxt_set_ring_params(bp);
11097 bnxt_close_nic(bp, false, false);
11100 bnxt_set_ring_params(bp);
11102 return bnxt_open_nic(bp, false, false);
11106 rc = bnxt_set_tpa(bp,
11107 (flags & BNXT_FLAG_TPA) ?
11110 bp->flags = old_flags;
11116 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
11119 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
11124 /* Check that there are at most 2 IPv6 extension headers, no
11125 * fragment header, and each is <= 64 bytes.
11127 start = nw_off + sizeof(*ip6h);
11128 nexthdr = &ip6h->nexthdr;
11129 while (ipv6_ext_hdr(*nexthdr)) {
11130 struct ipv6_opt_hdr *hp;
11133 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
11134 *nexthdr == NEXTHDR_FRAGMENT)
11136 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
11137 skb_headlen(skb), NULL);
11140 if (*nexthdr == NEXTHDR_AUTH)
11141 hdrlen = ipv6_authlen(hp);
11143 hdrlen = ipv6_optlen(hp);
11147 nexthdr = &hp->nexthdr;
11152 /* Caller will check inner protocol */
11153 if (skb->encapsulation) {
11159 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11160 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11163 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11164 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11166 struct udphdr *uh = udp_hdr(skb);
11167 __be16 udp_port = uh->dest;
11169 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11171 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11172 struct ethhdr *eh = inner_eth_hdr(skb);
11174 switch (eh->h_proto) {
11175 case htons(ETH_P_IP):
11177 case htons(ETH_P_IPV6):
11178 return bnxt_exthdr_check(bp, skb,
11179 skb_inner_network_offset(skb),
11186 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11188 switch (l4_proto) {
11190 return bnxt_udp_tunl_check(bp, skb);
11193 case IPPROTO_GRE: {
11194 switch (skb->inner_protocol) {
11197 case htons(ETH_P_IP):
11199 case htons(ETH_P_IPV6):
11204 /* Check ext headers of inner ipv6 */
11205 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11211 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11212 struct net_device *dev,
11213 netdev_features_t features)
11215 struct bnxt *bp = netdev_priv(dev);
11218 features = vlan_features_check(skb, features);
11219 switch (vlan_get_protocol(skb)) {
11220 case htons(ETH_P_IP):
11221 if (!skb->encapsulation)
11223 l4_proto = &ip_hdr(skb)->protocol;
11224 if (bnxt_tunl_check(bp, skb, *l4_proto))
11227 case htons(ETH_P_IPV6):
11228 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11231 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11235 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11238 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11241 struct hwrm_dbg_read_direct_output *resp;
11242 struct hwrm_dbg_read_direct_input *req;
11243 __le32 *dbg_reg_buf;
11244 dma_addr_t mapping;
11247 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11251 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11253 if (!dbg_reg_buf) {
11255 goto dbg_rd_reg_exit;
11258 req->host_dest_addr = cpu_to_le64(mapping);
11260 resp = hwrm_req_hold(bp, req);
11261 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11262 req->read_len32 = cpu_to_le32(num_words);
11264 rc = hwrm_req_send(bp, req);
11265 if (rc || resp->error_code) {
11267 goto dbg_rd_reg_exit;
11269 for (i = 0; i < num_words; i++)
11270 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11273 hwrm_req_drop(bp, req);
11277 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11278 u32 ring_id, u32 *prod, u32 *cons)
11280 struct hwrm_dbg_ring_info_get_output *resp;
11281 struct hwrm_dbg_ring_info_get_input *req;
11284 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11288 req->ring_type = ring_type;
11289 req->fw_ring_id = cpu_to_le32(ring_id);
11290 resp = hwrm_req_hold(bp, req);
11291 rc = hwrm_req_send(bp, req);
11293 *prod = le32_to_cpu(resp->producer_index);
11294 *cons = le32_to_cpu(resp->consumer_index);
11296 hwrm_req_drop(bp, req);
11300 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11302 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11303 int i = bnapi->index;
11308 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11309 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11313 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11315 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11316 int i = bnapi->index;
11321 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11322 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11323 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11324 rxr->rx_sw_agg_prod);
11327 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11329 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11330 int i = bnapi->index;
11332 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11333 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11336 static void bnxt_dbg_dump_states(struct bnxt *bp)
11339 struct bnxt_napi *bnapi;
11341 for (i = 0; i < bp->cp_nr_rings; i++) {
11342 bnapi = bp->bnapi[i];
11343 if (netif_msg_drv(bp)) {
11344 bnxt_dump_tx_sw_state(bnapi);
11345 bnxt_dump_rx_sw_state(bnapi);
11346 bnxt_dump_cp_sw_state(bnapi);
11351 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11353 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11354 struct hwrm_ring_reset_input *req;
11355 struct bnxt_napi *bnapi = rxr->bnapi;
11356 struct bnxt_cp_ring_info *cpr;
11360 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11364 cpr = &bnapi->cp_ring;
11365 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11366 req->cmpl_ring = cpu_to_le16(cp_ring_id);
11367 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11368 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11369 return hwrm_req_send_silent(bp, req);
11372 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11375 bnxt_dbg_dump_states(bp);
11376 if (netif_running(bp->dev)) {
11380 bnxt_close_nic(bp, false, false);
11381 bnxt_open_nic(bp, false, false);
11384 bnxt_close_nic(bp, true, false);
11385 rc = bnxt_open_nic(bp, true, false);
11386 bnxt_ulp_start(bp, rc);
11391 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11393 struct bnxt *bp = netdev_priv(dev);
11395 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
11396 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11397 bnxt_queue_sp_work(bp);
11400 static void bnxt_fw_health_check(struct bnxt *bp)
11402 struct bnxt_fw_health *fw_health = bp->fw_health;
11405 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11408 /* Make sure it is enabled before checking the tmr_counter. */
11410 if (fw_health->tmr_counter) {
11411 fw_health->tmr_counter--;
11415 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11416 if (val == fw_health->last_fw_heartbeat) {
11417 fw_health->arrests++;
11421 fw_health->last_fw_heartbeat = val;
11423 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11424 if (val != fw_health->last_fw_reset_cnt) {
11425 fw_health->discoveries++;
11429 fw_health->tmr_counter = fw_health->tmr_multiplier;
11433 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11434 bnxt_queue_sp_work(bp);
11437 static void bnxt_timer(struct timer_list *t)
11439 struct bnxt *bp = from_timer(bp, t, timer);
11440 struct net_device *dev = bp->dev;
11442 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11445 if (atomic_read(&bp->intr_sem) != 0)
11446 goto bnxt_restart_timer;
11448 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11449 bnxt_fw_health_check(bp);
11451 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) {
11452 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11453 bnxt_queue_sp_work(bp);
11456 if (bnxt_tc_flower_enabled(bp)) {
11457 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11458 bnxt_queue_sp_work(bp);
11461 #ifdef CONFIG_RFS_ACCEL
11462 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11463 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11464 bnxt_queue_sp_work(bp);
11466 #endif /*CONFIG_RFS_ACCEL*/
11468 if (bp->link_info.phy_retry) {
11469 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11470 bp->link_info.phy_retry = false;
11471 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11473 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11474 bnxt_queue_sp_work(bp);
11478 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) {
11479 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
11480 bnxt_queue_sp_work(bp);
11483 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11484 netif_carrier_ok(dev)) {
11485 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11486 bnxt_queue_sp_work(bp);
11488 bnxt_restart_timer:
11489 mod_timer(&bp->timer, jiffies + bp->current_interval);
11492 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11494 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11495 * set. If the device is being closed, bnxt_close() may be holding
11496 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11497 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11499 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11503 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11505 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11509 /* Only called from bnxt_sp_task() */
11510 static void bnxt_reset(struct bnxt *bp, bool silent)
11512 bnxt_rtnl_lock_sp(bp);
11513 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11514 bnxt_reset_task(bp, silent);
11515 bnxt_rtnl_unlock_sp(bp);
11518 /* Only called from bnxt_sp_task() */
11519 static void bnxt_rx_ring_reset(struct bnxt *bp)
11523 bnxt_rtnl_lock_sp(bp);
11524 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11525 bnxt_rtnl_unlock_sp(bp);
11528 /* Disable and flush TPA before resetting the RX ring */
11529 if (bp->flags & BNXT_FLAG_TPA)
11530 bnxt_set_tpa(bp, false);
11531 for (i = 0; i < bp->rx_nr_rings; i++) {
11532 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11533 struct bnxt_cp_ring_info *cpr;
11536 if (!rxr->bnapi->in_reset)
11539 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11541 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11542 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11544 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11546 bnxt_reset_task(bp, true);
11549 bnxt_free_one_rx_ring_skbs(bp, i);
11551 rxr->rx_agg_prod = 0;
11552 rxr->rx_sw_agg_prod = 0;
11553 rxr->rx_next_cons = 0;
11554 rxr->bnapi->in_reset = false;
11555 bnxt_alloc_one_rx_ring(bp, i);
11556 cpr = &rxr->bnapi->cp_ring;
11557 cpr->sw_stats.rx.rx_resets++;
11558 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11559 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11560 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11562 if (bp->flags & BNXT_FLAG_TPA)
11563 bnxt_set_tpa(bp, true);
11564 bnxt_rtnl_unlock_sp(bp);
11567 static void bnxt_fw_reset_close(struct bnxt *bp)
11570 /* When firmware is in fatal state, quiesce device and disable
11571 * bus master to prevent any potential bad DMAs before freeing
11574 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11577 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11579 bp->fw_reset_min_dsecs = 0;
11580 bnxt_tx_disable(bp);
11581 bnxt_disable_napi(bp);
11582 bnxt_disable_int_sync(bp);
11584 bnxt_clear_int_mode(bp);
11585 pci_disable_device(bp->pdev);
11587 __bnxt_close_nic(bp, true, false);
11588 bnxt_vf_reps_free(bp);
11589 bnxt_clear_int_mode(bp);
11590 bnxt_hwrm_func_drv_unrgtr(bp);
11591 if (pci_is_enabled(bp->pdev))
11592 pci_disable_device(bp->pdev);
11593 bnxt_free_ctx_mem(bp);
11598 static bool is_bnxt_fw_ok(struct bnxt *bp)
11600 struct bnxt_fw_health *fw_health = bp->fw_health;
11601 bool no_heartbeat = false, has_reset = false;
11604 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11605 if (val == fw_health->last_fw_heartbeat)
11606 no_heartbeat = true;
11608 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11609 if (val != fw_health->last_fw_reset_cnt)
11612 if (!no_heartbeat && has_reset)
11618 /* rtnl_lock is acquired before calling this function */
11619 static void bnxt_force_fw_reset(struct bnxt *bp)
11621 struct bnxt_fw_health *fw_health = bp->fw_health;
11622 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11625 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11626 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11630 spin_lock_bh(&ptp->ptp_lock);
11631 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11632 spin_unlock_bh(&ptp->ptp_lock);
11634 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11636 bnxt_fw_reset_close(bp);
11637 wait_dsecs = fw_health->master_func_wait_dsecs;
11638 if (fw_health->primary) {
11639 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11641 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11643 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11644 wait_dsecs = fw_health->normal_func_wait_dsecs;
11645 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11648 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11649 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11650 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11653 void bnxt_fw_exception(struct bnxt *bp)
11655 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11656 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11657 bnxt_rtnl_lock_sp(bp);
11658 bnxt_force_fw_reset(bp);
11659 bnxt_rtnl_unlock_sp(bp);
11662 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11665 static int bnxt_get_registered_vfs(struct bnxt *bp)
11667 #ifdef CONFIG_BNXT_SRIOV
11673 rc = bnxt_hwrm_func_qcfg(bp);
11675 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11678 if (bp->pf.registered_vfs)
11679 return bp->pf.registered_vfs;
11686 void bnxt_fw_reset(struct bnxt *bp)
11688 bnxt_rtnl_lock_sp(bp);
11689 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11690 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11691 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11695 spin_lock_bh(&ptp->ptp_lock);
11696 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11697 spin_unlock_bh(&ptp->ptp_lock);
11699 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11701 if (bp->pf.active_vfs &&
11702 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11703 n = bnxt_get_registered_vfs(bp);
11705 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11707 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11708 dev_close(bp->dev);
11709 goto fw_reset_exit;
11710 } else if (n > 0) {
11711 u16 vf_tmo_dsecs = n * 10;
11713 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11714 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11715 bp->fw_reset_state =
11716 BNXT_FW_RESET_STATE_POLL_VF;
11717 bnxt_queue_fw_reset_work(bp, HZ / 10);
11718 goto fw_reset_exit;
11720 bnxt_fw_reset_close(bp);
11721 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11722 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11725 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11726 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11728 bnxt_queue_fw_reset_work(bp, tmo);
11731 bnxt_rtnl_unlock_sp(bp);
11734 static void bnxt_chk_missed_irq(struct bnxt *bp)
11738 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11741 for (i = 0; i < bp->cp_nr_rings; i++) {
11742 struct bnxt_napi *bnapi = bp->bnapi[i];
11743 struct bnxt_cp_ring_info *cpr;
11750 cpr = &bnapi->cp_ring;
11751 for (j = 0; j < 2; j++) {
11752 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11755 if (!cpr2 || cpr2->has_more_work ||
11756 !bnxt_has_work(bp, cpr2))
11759 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11760 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11763 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11764 bnxt_dbg_hwrm_ring_info_get(bp,
11765 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11766 fw_ring_id, &val[0], &val[1]);
11767 cpr->sw_stats.cmn.missed_irqs++;
11772 static void bnxt_cfg_ntp_filters(struct bnxt *);
11774 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11776 struct bnxt_link_info *link_info = &bp->link_info;
11778 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11779 link_info->autoneg = BNXT_AUTONEG_SPEED;
11780 if (bp->hwrm_spec_code >= 0x10201) {
11781 if (link_info->auto_pause_setting &
11782 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11783 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11785 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11787 link_info->advertising = link_info->auto_link_speeds;
11788 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11790 link_info->req_link_speed = link_info->force_link_speed;
11791 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11792 if (link_info->force_pam4_link_speed) {
11793 link_info->req_link_speed =
11794 link_info->force_pam4_link_speed;
11795 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11797 link_info->req_duplex = link_info->duplex_setting;
11799 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11800 link_info->req_flow_ctrl =
11801 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11803 link_info->req_flow_ctrl = link_info->force_pause_setting;
11806 static void bnxt_fw_echo_reply(struct bnxt *bp)
11808 struct bnxt_fw_health *fw_health = bp->fw_health;
11809 struct hwrm_func_echo_response_input *req;
11812 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
11815 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11816 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11817 hwrm_req_send(bp, req);
11820 static void bnxt_sp_task(struct work_struct *work)
11822 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11824 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11825 smp_mb__after_atomic();
11826 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11827 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11831 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11832 bnxt_cfg_rx_mode(bp);
11834 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11835 bnxt_cfg_ntp_filters(bp);
11836 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11837 bnxt_hwrm_exec_fwd_req(bp);
11838 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11839 bnxt_hwrm_port_qstats(bp, 0);
11840 bnxt_hwrm_port_qstats_ext(bp, 0);
11841 bnxt_accumulate_all_stats(bp);
11844 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11847 mutex_lock(&bp->link_lock);
11848 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11850 bnxt_hwrm_phy_qcaps(bp);
11852 rc = bnxt_update_link(bp, true);
11854 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11857 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11859 bnxt_init_ethtool_link_settings(bp);
11860 mutex_unlock(&bp->link_lock);
11862 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11865 mutex_lock(&bp->link_lock);
11866 rc = bnxt_update_phy_setting(bp);
11867 mutex_unlock(&bp->link_lock);
11869 netdev_warn(bp->dev, "update phy settings retry failed\n");
11871 bp->link_info.phy_retry = false;
11872 netdev_info(bp->dev, "update phy settings retry succeeded\n");
11875 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11876 mutex_lock(&bp->link_lock);
11877 bnxt_get_port_module_status(bp);
11878 mutex_unlock(&bp->link_lock);
11881 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11882 bnxt_tc_flow_stats_work(bp);
11884 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11885 bnxt_chk_missed_irq(bp);
11887 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11888 bnxt_fw_echo_reply(bp);
11890 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
11891 * must be the last functions to be called before exiting.
11893 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11894 bnxt_reset(bp, false);
11896 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11897 bnxt_reset(bp, true);
11899 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11900 bnxt_rx_ring_reset(bp);
11902 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
11903 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
11904 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
11905 bnxt_devlink_health_fw_report(bp);
11910 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11911 if (!is_bnxt_fw_ok(bp))
11912 bnxt_devlink_health_fw_report(bp);
11915 smp_mb__before_atomic();
11916 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11919 /* Under rtnl_lock */
11920 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11923 int max_rx, max_tx, tx_sets = 1;
11924 int tx_rings_needed, stats;
11931 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11938 tx_rings_needed = tx * tx_sets + tx_xdp;
11939 if (max_tx < tx_rings_needed)
11943 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11946 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11948 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11950 if (BNXT_NEW_RM(bp)) {
11951 cp += bnxt_get_ulp_msix_num(bp);
11952 stats += bnxt_get_ulp_stat_ctxs(bp);
11954 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11958 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11961 pci_iounmap(pdev, bp->bar2);
11966 pci_iounmap(pdev, bp->bar1);
11971 pci_iounmap(pdev, bp->bar0);
11976 static void bnxt_cleanup_pci(struct bnxt *bp)
11978 bnxt_unmap_bars(bp, bp->pdev);
11979 pci_release_regions(bp->pdev);
11980 if (pci_is_enabled(bp->pdev))
11981 pci_disable_device(bp->pdev);
11984 static void bnxt_init_dflt_coal(struct bnxt *bp)
11986 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
11987 struct bnxt_coal *coal;
11990 if (coal_cap->cmpl_params &
11991 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
11992 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
11994 /* Tick values in micro seconds.
11995 * 1 coal_buf x bufs_per_record = 1 completion record.
11997 coal = &bp->rx_coal;
11998 coal->coal_ticks = 10;
11999 coal->coal_bufs = 30;
12000 coal->coal_ticks_irq = 1;
12001 coal->coal_bufs_irq = 2;
12002 coal->idle_thresh = 50;
12003 coal->bufs_per_record = 2;
12004 coal->budget = 64; /* NAPI budget */
12005 coal->flags = flags;
12007 coal = &bp->tx_coal;
12008 coal->coal_ticks = 28;
12009 coal->coal_bufs = 30;
12010 coal->coal_ticks_irq = 2;
12011 coal->coal_bufs_irq = 2;
12012 coal->bufs_per_record = 1;
12013 coal->flags = flags;
12015 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
12018 static int bnxt_fw_init_one_p1(struct bnxt *bp)
12023 rc = bnxt_hwrm_ver_get(bp);
12024 bnxt_try_map_fw_health_reg(bp);
12026 rc = bnxt_try_recover_fw(bp);
12029 rc = bnxt_hwrm_ver_get(bp);
12034 bnxt_nvm_cfg_ver_get(bp);
12036 rc = bnxt_hwrm_func_reset(bp);
12040 bnxt_hwrm_fw_set_time(bp);
12044 static int bnxt_fw_init_one_p2(struct bnxt *bp)
12048 /* Get the MAX capabilities for this function */
12049 rc = bnxt_hwrm_func_qcaps(bp);
12051 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
12056 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
12058 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
12061 if (bnxt_alloc_fw_health(bp)) {
12062 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
12064 rc = bnxt_hwrm_error_recovery_qcfg(bp);
12066 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
12070 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
12074 bnxt_hwrm_func_qcfg(bp);
12075 bnxt_hwrm_vnic_qcaps(bp);
12076 bnxt_hwrm_port_led_qcaps(bp);
12077 bnxt_ethtool_init(bp);
12082 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
12084 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
12085 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
12086 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
12087 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
12088 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
12089 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
12090 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
12091 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
12092 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
12096 static void bnxt_set_dflt_rfs(struct bnxt *bp)
12098 struct net_device *dev = bp->dev;
12100 dev->hw_features &= ~NETIF_F_NTUPLE;
12101 dev->features &= ~NETIF_F_NTUPLE;
12102 bp->flags &= ~BNXT_FLAG_RFS;
12103 if (bnxt_rfs_supported(bp)) {
12104 dev->hw_features |= NETIF_F_NTUPLE;
12105 if (bnxt_rfs_capable(bp)) {
12106 bp->flags |= BNXT_FLAG_RFS;
12107 dev->features |= NETIF_F_NTUPLE;
12112 static void bnxt_fw_init_one_p3(struct bnxt *bp)
12114 struct pci_dev *pdev = bp->pdev;
12116 bnxt_set_dflt_rss_hash_type(bp);
12117 bnxt_set_dflt_rfs(bp);
12119 bnxt_get_wol_settings(bp);
12120 if (bp->flags & BNXT_FLAG_WOL_CAP)
12121 device_set_wakeup_enable(&pdev->dev, bp->wol);
12123 device_set_wakeup_capable(&pdev->dev, false);
12125 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
12126 bnxt_hwrm_coal_params_qcaps(bp);
12129 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
12131 int bnxt_fw_init_one(struct bnxt *bp)
12135 rc = bnxt_fw_init_one_p1(bp);
12137 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
12140 rc = bnxt_fw_init_one_p2(bp);
12142 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
12145 rc = bnxt_probe_phy(bp, false);
12148 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
12152 bnxt_fw_init_one_p3(bp);
12156 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12158 struct bnxt_fw_health *fw_health = bp->fw_health;
12159 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
12160 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
12161 u32 reg_type, reg_off, delay_msecs;
12163 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
12164 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
12165 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
12166 switch (reg_type) {
12167 case BNXT_FW_HEALTH_REG_TYPE_CFG:
12168 pci_write_config_dword(bp->pdev, reg_off, val);
12170 case BNXT_FW_HEALTH_REG_TYPE_GRC:
12171 writel(reg_off & BNXT_GRC_BASE_MASK,
12172 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12173 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
12175 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12176 writel(val, bp->bar0 + reg_off);
12178 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12179 writel(val, bp->bar1 + reg_off);
12183 pci_read_config_dword(bp->pdev, 0, &val);
12184 msleep(delay_msecs);
12188 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
12190 struct hwrm_func_qcfg_output *resp;
12191 struct hwrm_func_qcfg_input *req;
12192 bool result = true; /* firmware will enforce if unknown */
12194 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
12197 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
12200 req->fid = cpu_to_le16(0xffff);
12201 resp = hwrm_req_hold(bp, req);
12202 if (!hwrm_req_send(bp, req))
12203 result = !!(le16_to_cpu(resp->flags) &
12204 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
12205 hwrm_req_drop(bp, req);
12209 static void bnxt_reset_all(struct bnxt *bp)
12211 struct bnxt_fw_health *fw_health = bp->fw_health;
12214 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12215 bnxt_fw_reset_via_optee(bp);
12216 bp->fw_reset_timestamp = jiffies;
12220 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12221 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12222 bnxt_fw_reset_writel(bp, i);
12223 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
12224 struct hwrm_fw_reset_input *req;
12226 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12228 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12229 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12230 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12231 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12232 rc = hwrm_req_send(bp, req);
12235 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12237 bp->fw_reset_timestamp = jiffies;
12240 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12242 return time_after(jiffies, bp->fw_reset_timestamp +
12243 (bp->fw_reset_max_dsecs * HZ / 10));
12246 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12248 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12249 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12250 bnxt_ulp_start(bp, rc);
12251 bnxt_dl_health_fw_status_update(bp, false);
12253 bp->fw_reset_state = 0;
12254 dev_close(bp->dev);
12257 static void bnxt_fw_reset_task(struct work_struct *work)
12259 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12262 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12263 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12267 switch (bp->fw_reset_state) {
12268 case BNXT_FW_RESET_STATE_POLL_VF: {
12269 int n = bnxt_get_registered_vfs(bp);
12273 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12274 n, jiffies_to_msecs(jiffies -
12275 bp->fw_reset_timestamp));
12276 goto fw_reset_abort;
12277 } else if (n > 0) {
12278 if (bnxt_fw_reset_timeout(bp)) {
12279 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12280 bp->fw_reset_state = 0;
12281 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12285 bnxt_queue_fw_reset_work(bp, HZ / 10);
12288 bp->fw_reset_timestamp = jiffies;
12290 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12291 bnxt_fw_reset_abort(bp, rc);
12295 bnxt_fw_reset_close(bp);
12296 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12297 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12300 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12301 tmo = bp->fw_reset_min_dsecs * HZ / 10;
12304 bnxt_queue_fw_reset_work(bp, tmo);
12307 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12310 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12311 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12312 !bnxt_fw_reset_timeout(bp)) {
12313 bnxt_queue_fw_reset_work(bp, HZ / 5);
12317 if (!bp->fw_health->primary) {
12318 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12320 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12321 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12324 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12327 case BNXT_FW_RESET_STATE_RESET_FW:
12328 bnxt_reset_all(bp);
12329 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12330 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12332 case BNXT_FW_RESET_STATE_ENABLE_DEV:
12333 bnxt_inv_fw_health_reg(bp);
12334 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12335 !bp->fw_reset_min_dsecs) {
12338 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12339 if (val == 0xffff) {
12340 if (bnxt_fw_reset_timeout(bp)) {
12341 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12343 goto fw_reset_abort;
12345 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12349 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12350 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
12351 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
12352 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
12353 bnxt_dl_remote_reload(bp);
12354 if (pci_enable_device(bp->pdev)) {
12355 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12357 goto fw_reset_abort;
12359 pci_set_master(bp->pdev);
12360 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12362 case BNXT_FW_RESET_STATE_POLL_FW:
12363 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12364 rc = bnxt_hwrm_poll(bp);
12366 if (bnxt_fw_reset_timeout(bp)) {
12367 netdev_err(bp->dev, "Firmware reset aborted\n");
12368 goto fw_reset_abort_status;
12370 bnxt_queue_fw_reset_work(bp, HZ / 5);
12373 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12374 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12376 case BNXT_FW_RESET_STATE_OPENING:
12377 while (!rtnl_trylock()) {
12378 bnxt_queue_fw_reset_work(bp, HZ / 10);
12381 rc = bnxt_open(bp->dev);
12383 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12384 bnxt_fw_reset_abort(bp, rc);
12389 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
12390 bp->fw_health->enabled) {
12391 bp->fw_health->last_fw_reset_cnt =
12392 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12394 bp->fw_reset_state = 0;
12395 /* Make sure fw_reset_state is 0 before clearing the flag */
12396 smp_mb__before_atomic();
12397 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12398 bnxt_ulp_start(bp, 0);
12399 bnxt_reenable_sriov(bp);
12400 bnxt_vf_reps_alloc(bp);
12401 bnxt_vf_reps_open(bp);
12402 bnxt_ptp_reapply_pps(bp);
12403 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
12404 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
12405 bnxt_dl_health_fw_recovery_done(bp);
12406 bnxt_dl_health_fw_status_update(bp, true);
12413 fw_reset_abort_status:
12414 if (bp->fw_health->status_reliable ||
12415 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12416 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12418 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12422 bnxt_fw_reset_abort(bp, rc);
12426 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12429 struct bnxt *bp = netdev_priv(dev);
12431 SET_NETDEV_DEV(dev, &pdev->dev);
12433 /* enable device (incl. PCI PM wakeup), and bus-mastering */
12434 rc = pci_enable_device(pdev);
12436 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12440 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12441 dev_err(&pdev->dev,
12442 "Cannot find PCI device base address, aborting\n");
12444 goto init_err_disable;
12447 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12449 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12450 goto init_err_disable;
12453 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12454 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12455 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12457 goto init_err_release;
12460 pci_set_master(pdev);
12465 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12466 * determines the BAR size.
12468 bp->bar0 = pci_ioremap_bar(pdev, 0);
12470 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12472 goto init_err_release;
12475 bp->bar2 = pci_ioremap_bar(pdev, 4);
12477 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12479 goto init_err_release;
12482 pci_enable_pcie_error_reporting(pdev);
12484 INIT_WORK(&bp->sp_task, bnxt_sp_task);
12485 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12487 spin_lock_init(&bp->ntp_fltr_lock);
12488 #if BITS_PER_LONG == 32
12489 spin_lock_init(&bp->db_lock);
12492 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12493 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12495 timer_setup(&bp->timer, bnxt_timer, 0);
12496 bp->current_interval = BNXT_TIMER_INTERVAL;
12498 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12499 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12501 clear_bit(BNXT_STATE_OPEN, &bp->state);
12505 bnxt_unmap_bars(bp, pdev);
12506 pci_release_regions(pdev);
12509 pci_disable_device(pdev);
12515 /* rtnl_lock held */
12516 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12518 struct sockaddr *addr = p;
12519 struct bnxt *bp = netdev_priv(dev);
12522 if (!is_valid_ether_addr(addr->sa_data))
12523 return -EADDRNOTAVAIL;
12525 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12528 rc = bnxt_approve_mac(bp, addr->sa_data, true);
12532 eth_hw_addr_set(dev, addr->sa_data);
12533 if (netif_running(dev)) {
12534 bnxt_close_nic(bp, false, false);
12535 rc = bnxt_open_nic(bp, false, false);
12541 /* rtnl_lock held */
12542 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12544 struct bnxt *bp = netdev_priv(dev);
12546 if (netif_running(dev))
12547 bnxt_close_nic(bp, true, false);
12549 dev->mtu = new_mtu;
12550 bnxt_set_ring_params(bp);
12552 if (netif_running(dev))
12553 return bnxt_open_nic(bp, true, false);
12558 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12560 struct bnxt *bp = netdev_priv(dev);
12564 if (tc > bp->max_tc) {
12565 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12570 if (netdev_get_num_tc(dev) == tc)
12573 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12576 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12577 sh, tc, bp->tx_nr_rings_xdp);
12581 /* Needs to close the device and do hw resource re-allocations */
12582 if (netif_running(bp->dev))
12583 bnxt_close_nic(bp, true, false);
12586 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12587 netdev_set_num_tc(dev, tc);
12589 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12590 netdev_reset_tc(dev);
12592 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12593 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12594 bp->tx_nr_rings + bp->rx_nr_rings;
12596 if (netif_running(bp->dev))
12597 return bnxt_open_nic(bp, true, false);
12602 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12605 struct bnxt *bp = cb_priv;
12607 if (!bnxt_tc_flower_enabled(bp) ||
12608 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12609 return -EOPNOTSUPP;
12612 case TC_SETUP_CLSFLOWER:
12613 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12615 return -EOPNOTSUPP;
12619 LIST_HEAD(bnxt_block_cb_list);
12621 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12624 struct bnxt *bp = netdev_priv(dev);
12627 case TC_SETUP_BLOCK:
12628 return flow_block_cb_setup_simple(type_data,
12629 &bnxt_block_cb_list,
12630 bnxt_setup_tc_block_cb,
12632 case TC_SETUP_QDISC_MQPRIO: {
12633 struct tc_mqprio_qopt *mqprio = type_data;
12635 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12637 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12640 return -EOPNOTSUPP;
12644 #ifdef CONFIG_RFS_ACCEL
12645 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12646 struct bnxt_ntuple_filter *f2)
12648 struct flow_keys *keys1 = &f1->fkeys;
12649 struct flow_keys *keys2 = &f2->fkeys;
12651 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12652 keys1->basic.ip_proto != keys2->basic.ip_proto)
12655 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12656 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12657 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12660 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12661 sizeof(keys1->addrs.v6addrs.src)) ||
12662 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12663 sizeof(keys1->addrs.v6addrs.dst)))
12667 if (keys1->ports.ports == keys2->ports.ports &&
12668 keys1->control.flags == keys2->control.flags &&
12669 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12670 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12676 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12677 u16 rxq_index, u32 flow_id)
12679 struct bnxt *bp = netdev_priv(dev);
12680 struct bnxt_ntuple_filter *fltr, *new_fltr;
12681 struct flow_keys *fkeys;
12682 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12683 int rc = 0, idx, bit_id, l2_idx = 0;
12684 struct hlist_head *head;
12687 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12688 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12691 netif_addr_lock_bh(dev);
12692 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12693 if (ether_addr_equal(eth->h_dest,
12694 vnic->uc_list + off)) {
12699 netif_addr_unlock_bh(dev);
12703 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12707 fkeys = &new_fltr->fkeys;
12708 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12709 rc = -EPROTONOSUPPORT;
12713 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12714 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12715 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12716 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12717 rc = -EPROTONOSUPPORT;
12720 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12721 bp->hwrm_spec_code < 0x10601) {
12722 rc = -EPROTONOSUPPORT;
12725 flags = fkeys->control.flags;
12726 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12727 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12728 rc = -EPROTONOSUPPORT;
12732 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12733 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12735 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12736 head = &bp->ntp_fltr_hash_tbl[idx];
12738 hlist_for_each_entry_rcu(fltr, head, hash) {
12739 if (bnxt_fltr_match(fltr, new_fltr)) {
12747 spin_lock_bh(&bp->ntp_fltr_lock);
12748 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12749 BNXT_NTP_FLTR_MAX_FLTR, 0);
12751 spin_unlock_bh(&bp->ntp_fltr_lock);
12756 new_fltr->sw_id = (u16)bit_id;
12757 new_fltr->flow_id = flow_id;
12758 new_fltr->l2_fltr_idx = l2_idx;
12759 new_fltr->rxq = rxq_index;
12760 hlist_add_head_rcu(&new_fltr->hash, head);
12761 bp->ntp_fltr_count++;
12762 spin_unlock_bh(&bp->ntp_fltr_lock);
12764 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12765 bnxt_queue_sp_work(bp);
12767 return new_fltr->sw_id;
12774 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12778 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12779 struct hlist_head *head;
12780 struct hlist_node *tmp;
12781 struct bnxt_ntuple_filter *fltr;
12784 head = &bp->ntp_fltr_hash_tbl[i];
12785 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12788 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12789 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12792 bnxt_hwrm_cfa_ntuple_filter_free(bp,
12797 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12802 set_bit(BNXT_FLTR_VALID, &fltr->state);
12806 spin_lock_bh(&bp->ntp_fltr_lock);
12807 hlist_del_rcu(&fltr->hash);
12808 bp->ntp_fltr_count--;
12809 spin_unlock_bh(&bp->ntp_fltr_lock);
12811 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12816 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12817 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12822 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12826 #endif /* CONFIG_RFS_ACCEL */
12828 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12830 struct bnxt *bp = netdev_priv(netdev);
12831 struct udp_tunnel_info ti;
12834 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12835 if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
12836 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12838 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12841 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12843 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12846 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12847 .sync_table = bnxt_udp_tunnel_sync,
12848 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12849 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12851 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
12852 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12856 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12857 struct net_device *dev, u32 filter_mask,
12860 struct bnxt *bp = netdev_priv(dev);
12862 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12863 nlflags, filter_mask, NULL);
12866 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12867 u16 flags, struct netlink_ext_ack *extack)
12869 struct bnxt *bp = netdev_priv(dev);
12870 struct nlattr *attr, *br_spec;
12873 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12874 return -EOPNOTSUPP;
12876 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12880 nla_for_each_nested(attr, br_spec, rem) {
12883 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12886 if (nla_len(attr) < sizeof(mode))
12889 mode = nla_get_u16(attr);
12890 if (mode == bp->br_mode)
12893 rc = bnxt_hwrm_set_br_mode(bp, mode);
12895 bp->br_mode = mode;
12901 int bnxt_get_port_parent_id(struct net_device *dev,
12902 struct netdev_phys_item_id *ppid)
12904 struct bnxt *bp = netdev_priv(dev);
12906 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12907 return -EOPNOTSUPP;
12909 /* The PF and it's VF-reps only support the switchdev framework */
12910 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12911 return -EOPNOTSUPP;
12913 ppid->id_len = sizeof(bp->dsn);
12914 memcpy(ppid->id, bp->dsn, ppid->id_len);
12919 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12921 struct bnxt *bp = netdev_priv(dev);
12923 return &bp->dl_port;
12926 static const struct net_device_ops bnxt_netdev_ops = {
12927 .ndo_open = bnxt_open,
12928 .ndo_start_xmit = bnxt_start_xmit,
12929 .ndo_stop = bnxt_close,
12930 .ndo_get_stats64 = bnxt_get_stats64,
12931 .ndo_set_rx_mode = bnxt_set_rx_mode,
12932 .ndo_eth_ioctl = bnxt_ioctl,
12933 .ndo_validate_addr = eth_validate_addr,
12934 .ndo_set_mac_address = bnxt_change_mac_addr,
12935 .ndo_change_mtu = bnxt_change_mtu,
12936 .ndo_fix_features = bnxt_fix_features,
12937 .ndo_set_features = bnxt_set_features,
12938 .ndo_features_check = bnxt_features_check,
12939 .ndo_tx_timeout = bnxt_tx_timeout,
12940 #ifdef CONFIG_BNXT_SRIOV
12941 .ndo_get_vf_config = bnxt_get_vf_config,
12942 .ndo_set_vf_mac = bnxt_set_vf_mac,
12943 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
12944 .ndo_set_vf_rate = bnxt_set_vf_bw,
12945 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
12946 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
12947 .ndo_set_vf_trust = bnxt_set_vf_trust,
12949 .ndo_setup_tc = bnxt_setup_tc,
12950 #ifdef CONFIG_RFS_ACCEL
12951 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
12953 .ndo_bpf = bnxt_xdp,
12954 .ndo_xdp_xmit = bnxt_xdp_xmit,
12955 .ndo_bridge_getlink = bnxt_bridge_getlink,
12956 .ndo_bridge_setlink = bnxt_bridge_setlink,
12957 .ndo_get_devlink_port = bnxt_get_devlink_port,
12960 static void bnxt_remove_one(struct pci_dev *pdev)
12962 struct net_device *dev = pci_get_drvdata(pdev);
12963 struct bnxt *bp = netdev_priv(dev);
12966 bnxt_sriov_disable(bp);
12969 devlink_port_type_clear(&bp->dl_port);
12971 bnxt_ptp_clear(bp);
12972 pci_disable_pcie_error_reporting(pdev);
12973 unregister_netdev(dev);
12974 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12975 /* Flush any pending tasks */
12976 cancel_work_sync(&bp->sp_task);
12977 cancel_delayed_work_sync(&bp->fw_reset_task);
12980 bnxt_dl_fw_reporters_destroy(bp);
12981 bnxt_dl_unregister(bp);
12982 bnxt_shutdown_tc(bp);
12984 bnxt_clear_int_mode(bp);
12985 bnxt_hwrm_func_drv_unrgtr(bp);
12986 bnxt_free_hwrm_resources(bp);
12987 bnxt_ethtool_free(bp);
12991 kfree(bp->ptp_cfg);
12992 bp->ptp_cfg = NULL;
12993 kfree(bp->fw_health);
12994 bp->fw_health = NULL;
12995 bnxt_cleanup_pci(bp);
12996 bnxt_free_ctx_mem(bp);
12999 kfree(bp->rss_indir_tbl);
13000 bp->rss_indir_tbl = NULL;
13001 bnxt_free_port_stats(bp);
13005 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
13008 struct bnxt_link_info *link_info = &bp->link_info;
13011 rc = bnxt_hwrm_phy_qcaps(bp);
13013 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
13017 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
13018 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
13020 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
13024 mutex_lock(&bp->link_lock);
13025 rc = bnxt_update_link(bp, false);
13027 mutex_unlock(&bp->link_lock);
13028 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
13033 /* Older firmware does not have supported_auto_speeds, so assume
13034 * that all supported speeds can be autonegotiated.
13036 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
13037 link_info->support_auto_speeds = link_info->support_speeds;
13039 bnxt_init_ethtool_link_settings(bp);
13040 mutex_unlock(&bp->link_lock);
13044 static int bnxt_get_max_irq(struct pci_dev *pdev)
13048 if (!pdev->msix_cap)
13051 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
13052 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
13055 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13058 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13059 int max_ring_grps = 0, max_irq;
13061 *max_tx = hw_resc->max_tx_rings;
13062 *max_rx = hw_resc->max_rx_rings;
13063 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
13064 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
13065 bnxt_get_ulp_msix_num(bp),
13066 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
13067 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
13068 *max_cp = min_t(int, *max_cp, max_irq);
13069 max_ring_grps = hw_resc->max_hw_ring_grps;
13070 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
13074 if (bp->flags & BNXT_FLAG_AGG_RINGS)
13076 if (bp->flags & BNXT_FLAG_CHIP_P5) {
13077 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
13078 /* On P5 chips, max_cp output param should be available NQs */
13081 *max_rx = min_t(int, *max_rx, max_ring_grps);
13084 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
13088 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
13091 if (!rx || !tx || !cp)
13094 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
13097 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13102 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13103 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
13104 /* Not enough rings, try disabling agg rings. */
13105 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
13106 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13108 /* set BNXT_FLAG_AGG_RINGS back for consistency */
13109 bp->flags |= BNXT_FLAG_AGG_RINGS;
13112 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
13113 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13114 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13115 bnxt_set_ring_params(bp);
13118 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
13119 int max_cp, max_stat, max_irq;
13121 /* Reserve minimum resources for RoCE */
13122 max_cp = bnxt_get_max_func_cp_rings(bp);
13123 max_stat = bnxt_get_max_func_stat_ctxs(bp);
13124 max_irq = bnxt_get_max_func_irqs(bp);
13125 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
13126 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
13127 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
13130 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
13131 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
13132 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
13133 max_cp = min_t(int, max_cp, max_irq);
13134 max_cp = min_t(int, max_cp, max_stat);
13135 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
13142 /* In initial default shared ring setting, each shared ring must have a
13145 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
13147 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
13148 bp->rx_nr_rings = bp->cp_nr_rings;
13149 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
13150 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13153 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
13155 int dflt_rings, max_rx_rings, max_tx_rings, rc;
13157 if (!bnxt_can_reserve_rings(bp))
13161 bp->flags |= BNXT_FLAG_SHARED_RINGS;
13162 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
13163 /* Reduce default rings on multi-port cards so that total default
13164 * rings do not exceed CPU count.
13166 if (bp->port_count > 1) {
13168 max_t(int, num_online_cpus() / bp->port_count, 1);
13170 dflt_rings = min_t(int, dflt_rings, max_rings);
13172 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
13175 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
13176 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
13178 bnxt_trim_dflt_sh_rings(bp);
13180 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
13181 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13183 rc = __bnxt_reserve_rings(bp);
13184 if (rc && rc != -ENODEV)
13185 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
13186 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13188 bnxt_trim_dflt_sh_rings(bp);
13190 /* Rings may have been trimmed, re-reserve the trimmed rings. */
13191 if (bnxt_need_reserve_rings(bp)) {
13192 rc = __bnxt_reserve_rings(bp);
13193 if (rc && rc != -ENODEV)
13194 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13195 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13197 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13202 bp->tx_nr_rings = 0;
13203 bp->rx_nr_rings = 0;
13208 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13212 if (bp->tx_nr_rings)
13215 bnxt_ulp_irq_stop(bp);
13216 bnxt_clear_int_mode(bp);
13217 rc = bnxt_set_dflt_rings(bp, true);
13219 if (BNXT_VF(bp) && rc == -ENODEV)
13220 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13222 netdev_err(bp->dev, "Not enough rings available.\n");
13223 goto init_dflt_ring_err;
13225 rc = bnxt_init_int_mode(bp);
13227 goto init_dflt_ring_err;
13229 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13230 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
13231 bp->flags |= BNXT_FLAG_RFS;
13232 bp->dev->features |= NETIF_F_NTUPLE;
13234 init_dflt_ring_err:
13235 bnxt_ulp_irq_restart(bp, rc);
13239 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13244 bnxt_hwrm_func_qcaps(bp);
13246 if (netif_running(bp->dev))
13247 __bnxt_close_nic(bp, true, false);
13249 bnxt_ulp_irq_stop(bp);
13250 bnxt_clear_int_mode(bp);
13251 rc = bnxt_init_int_mode(bp);
13252 bnxt_ulp_irq_restart(bp, rc);
13254 if (netif_running(bp->dev)) {
13256 dev_close(bp->dev);
13258 rc = bnxt_open_nic(bp, true, false);
13264 static int bnxt_init_mac_addr(struct bnxt *bp)
13269 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
13271 #ifdef CONFIG_BNXT_SRIOV
13272 struct bnxt_vf_info *vf = &bp->vf;
13273 bool strict_approval = true;
13275 if (is_valid_ether_addr(vf->mac_addr)) {
13276 /* overwrite netdev dev_addr with admin VF MAC */
13277 eth_hw_addr_set(bp->dev, vf->mac_addr);
13278 /* Older PF driver or firmware may not approve this
13281 strict_approval = false;
13283 eth_hw_addr_random(bp->dev);
13285 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13291 static void bnxt_vpd_read_info(struct bnxt *bp)
13293 struct pci_dev *pdev = bp->pdev;
13294 unsigned int vpd_size, kw_len;
13298 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
13299 if (IS_ERR(vpd_data)) {
13300 pci_warn(pdev, "Unable to read VPD\n");
13304 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13305 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
13309 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13310 memcpy(bp->board_partno, &vpd_data[pos], size);
13313 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13314 PCI_VPD_RO_KEYWORD_SERIALNO,
13319 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13320 memcpy(bp->board_serialno, &vpd_data[pos], size);
13325 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13327 struct pci_dev *pdev = bp->pdev;
13330 qword = pci_get_dsn(pdev);
13332 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13333 return -EOPNOTSUPP;
13336 put_unaligned_le64(qword, dsn);
13338 bp->flags |= BNXT_FLAG_DSN_VALID;
13342 static int bnxt_map_db_bar(struct bnxt *bp)
13346 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13352 void bnxt_print_device_info(struct bnxt *bp)
13354 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
13355 board_info[bp->board_idx].name,
13356 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
13358 pcie_print_link_status(bp->pdev);
13361 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13363 struct net_device *dev;
13367 if (pci_is_bridge(pdev))
13370 /* Clear any pending DMA transactions from crash kernel
13371 * while loading driver in capture kernel.
13373 if (is_kdump_kernel()) {
13374 pci_clear_master(pdev);
13378 max_irqs = bnxt_get_max_irq(pdev);
13379 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13383 bp = netdev_priv(dev);
13384 bp->board_idx = ent->driver_data;
13385 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13386 bnxt_set_max_func_irqs(bp, max_irqs);
13388 if (bnxt_vf_pciid(bp->board_idx))
13389 bp->flags |= BNXT_FLAG_VF;
13391 if (pdev->msix_cap)
13392 bp->flags |= BNXT_FLAG_MSIX_CAP;
13394 rc = bnxt_init_board(pdev, dev);
13396 goto init_err_free;
13398 dev->netdev_ops = &bnxt_netdev_ops;
13399 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13400 dev->ethtool_ops = &bnxt_ethtool_ops;
13401 pci_set_drvdata(pdev, dev);
13403 rc = bnxt_alloc_hwrm_resources(bp);
13405 goto init_err_pci_clean;
13407 mutex_init(&bp->hwrm_cmd_lock);
13408 mutex_init(&bp->link_lock);
13410 rc = bnxt_fw_init_one_p1(bp);
13412 goto init_err_pci_clean;
13415 bnxt_vpd_read_info(bp);
13417 if (BNXT_CHIP_P5(bp)) {
13418 bp->flags |= BNXT_FLAG_CHIP_P5;
13419 if (BNXT_CHIP_SR2(bp))
13420 bp->flags |= BNXT_FLAG_CHIP_SR2;
13423 rc = bnxt_alloc_rss_indir_tbl(bp);
13425 goto init_err_pci_clean;
13427 rc = bnxt_fw_init_one_p2(bp);
13429 goto init_err_pci_clean;
13431 rc = bnxt_map_db_bar(bp);
13433 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13435 goto init_err_pci_clean;
13438 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13439 NETIF_F_TSO | NETIF_F_TSO6 |
13440 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13441 NETIF_F_GSO_IPXIP4 |
13442 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13443 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13444 NETIF_F_RXCSUM | NETIF_F_GRO;
13446 if (BNXT_SUPPORTS_TPA(bp))
13447 dev->hw_features |= NETIF_F_LRO;
13449 dev->hw_enc_features =
13450 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13451 NETIF_F_TSO | NETIF_F_TSO6 |
13452 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13453 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13454 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13455 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13457 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13458 NETIF_F_GSO_GRE_CSUM;
13459 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13460 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13461 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13462 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13463 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13464 if (BNXT_SUPPORTS_TPA(bp))
13465 dev->hw_features |= NETIF_F_GRO_HW;
13466 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13467 if (dev->features & NETIF_F_GRO_HW)
13468 dev->features &= ~NETIF_F_LRO;
13469 dev->priv_flags |= IFF_UNICAST_FLT;
13471 #ifdef CONFIG_BNXT_SRIOV
13472 init_waitqueue_head(&bp->sriov_cfg_wait);
13474 if (BNXT_SUPPORTS_TPA(bp)) {
13475 bp->gro_func = bnxt_gro_func_5730x;
13476 if (BNXT_CHIP_P4(bp))
13477 bp->gro_func = bnxt_gro_func_5731x;
13478 else if (BNXT_CHIP_P5(bp))
13479 bp->gro_func = bnxt_gro_func_5750x;
13481 if (!BNXT_CHIP_P4_PLUS(bp))
13482 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13484 rc = bnxt_init_mac_addr(bp);
13486 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13487 rc = -EADDRNOTAVAIL;
13488 goto init_err_pci_clean;
13492 /* Read the adapter's DSN to use as the eswitch switch_id */
13493 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13496 /* MTU range: 60 - FW defined max */
13497 dev->min_mtu = ETH_ZLEN;
13498 dev->max_mtu = bp->max_mtu;
13500 rc = bnxt_probe_phy(bp, true);
13502 goto init_err_pci_clean;
13504 bnxt_set_rx_skb_mode(bp, false);
13505 bnxt_set_tpa_flags(bp);
13506 bnxt_set_ring_params(bp);
13507 rc = bnxt_set_dflt_rings(bp, true);
13509 if (BNXT_VF(bp) && rc == -ENODEV) {
13510 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13512 netdev_err(bp->dev, "Not enough rings available.\n");
13515 goto init_err_pci_clean;
13518 bnxt_fw_init_one_p3(bp);
13520 bnxt_init_dflt_coal(bp);
13522 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13523 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13525 rc = bnxt_init_int_mode(bp);
13527 goto init_err_pci_clean;
13529 /* No TC has been set yet and rings may have been trimmed due to
13530 * limited MSIX, so we re-initialize the TX rings per TC.
13532 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13537 create_singlethread_workqueue("bnxt_pf_wq");
13539 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13541 goto init_err_pci_clean;
13544 rc = bnxt_init_tc(bp);
13546 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13550 bnxt_inv_fw_health_reg(bp);
13551 rc = bnxt_dl_register(bp);
13555 rc = register_netdev(dev);
13557 goto init_err_cleanup;
13560 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13561 bnxt_dl_fw_reporters_create(bp);
13563 bnxt_print_device_info(bp);
13565 pci_save_state(pdev);
13569 bnxt_dl_unregister(bp);
13571 bnxt_shutdown_tc(bp);
13572 bnxt_clear_int_mode(bp);
13574 init_err_pci_clean:
13575 bnxt_hwrm_func_drv_unrgtr(bp);
13576 bnxt_free_hwrm_resources(bp);
13577 bnxt_ethtool_free(bp);
13578 bnxt_ptp_clear(bp);
13579 kfree(bp->ptp_cfg);
13580 bp->ptp_cfg = NULL;
13581 kfree(bp->fw_health);
13582 bp->fw_health = NULL;
13583 bnxt_cleanup_pci(bp);
13584 bnxt_free_ctx_mem(bp);
13587 kfree(bp->rss_indir_tbl);
13588 bp->rss_indir_tbl = NULL;
13595 static void bnxt_shutdown(struct pci_dev *pdev)
13597 struct net_device *dev = pci_get_drvdata(pdev);
13604 bp = netdev_priv(dev);
13606 goto shutdown_exit;
13608 if (netif_running(dev))
13611 bnxt_ulp_shutdown(bp);
13612 bnxt_clear_int_mode(bp);
13613 pci_disable_device(pdev);
13615 if (system_state == SYSTEM_POWER_OFF) {
13616 pci_wake_from_d3(pdev, bp->wol);
13617 pci_set_power_state(pdev, PCI_D3hot);
13624 #ifdef CONFIG_PM_SLEEP
13625 static int bnxt_suspend(struct device *device)
13627 struct net_device *dev = dev_get_drvdata(device);
13628 struct bnxt *bp = netdev_priv(dev);
13633 if (netif_running(dev)) {
13634 netif_device_detach(dev);
13635 rc = bnxt_close(dev);
13637 bnxt_hwrm_func_drv_unrgtr(bp);
13638 pci_disable_device(bp->pdev);
13639 bnxt_free_ctx_mem(bp);
13646 static int bnxt_resume(struct device *device)
13648 struct net_device *dev = dev_get_drvdata(device);
13649 struct bnxt *bp = netdev_priv(dev);
13653 rc = pci_enable_device(bp->pdev);
13655 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13659 pci_set_master(bp->pdev);
13660 if (bnxt_hwrm_ver_get(bp)) {
13664 rc = bnxt_hwrm_func_reset(bp);
13670 rc = bnxt_hwrm_func_qcaps(bp);
13674 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13679 bnxt_get_wol_settings(bp);
13680 if (netif_running(dev)) {
13681 rc = bnxt_open(dev);
13683 netif_device_attach(dev);
13687 bnxt_ulp_start(bp, rc);
13689 bnxt_reenable_sriov(bp);
13694 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13695 #define BNXT_PM_OPS (&bnxt_pm_ops)
13699 #define BNXT_PM_OPS NULL
13701 #endif /* CONFIG_PM_SLEEP */
13704 * bnxt_io_error_detected - called when PCI error is detected
13705 * @pdev: Pointer to PCI device
13706 * @state: The current pci connection state
13708 * This function is called after a PCI bus error affecting
13709 * this device has been detected.
13711 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13712 pci_channel_state_t state)
13714 struct net_device *netdev = pci_get_drvdata(pdev);
13715 struct bnxt *bp = netdev_priv(netdev);
13717 netdev_info(netdev, "PCI I/O error detected\n");
13720 netif_device_detach(netdev);
13724 if (state == pci_channel_io_perm_failure) {
13726 return PCI_ERS_RESULT_DISCONNECT;
13729 if (state == pci_channel_io_frozen)
13730 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13732 if (netif_running(netdev))
13733 bnxt_close(netdev);
13735 if (pci_is_enabled(pdev))
13736 pci_disable_device(pdev);
13737 bnxt_free_ctx_mem(bp);
13742 /* Request a slot slot reset. */
13743 return PCI_ERS_RESULT_NEED_RESET;
13747 * bnxt_io_slot_reset - called after the pci bus has been reset.
13748 * @pdev: Pointer to PCI device
13750 * Restart the card from scratch, as if from a cold-boot.
13751 * At this point, the card has exprienced a hard reset,
13752 * followed by fixups by BIOS, and has its config space
13753 * set up identically to what it was at cold boot.
13755 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13757 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13758 struct net_device *netdev = pci_get_drvdata(pdev);
13759 struct bnxt *bp = netdev_priv(netdev);
13762 netdev_info(bp->dev, "PCI Slot Reset\n");
13766 if (pci_enable_device(pdev)) {
13767 dev_err(&pdev->dev,
13768 "Cannot re-enable PCI device after reset.\n");
13770 pci_set_master(pdev);
13771 /* Upon fatal error, our device internal logic that latches to
13772 * BAR value is getting reset and will restore only upon
13773 * rewritting the BARs.
13775 * As pci_restore_state() does not re-write the BARs if the
13776 * value is same as saved value earlier, driver needs to
13777 * write the BARs to 0 to force restore, in case of fatal error.
13779 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13781 for (off = PCI_BASE_ADDRESS_0;
13782 off <= PCI_BASE_ADDRESS_5; off += 4)
13783 pci_write_config_dword(bp->pdev, off, 0);
13785 pci_restore_state(pdev);
13786 pci_save_state(pdev);
13788 err = bnxt_hwrm_func_reset(bp);
13790 result = PCI_ERS_RESULT_RECOVERED;
13799 * bnxt_io_resume - called when traffic can start flowing again.
13800 * @pdev: Pointer to PCI device
13802 * This callback is called when the error recovery driver tells
13803 * us that its OK to resume normal operation.
13805 static void bnxt_io_resume(struct pci_dev *pdev)
13807 struct net_device *netdev = pci_get_drvdata(pdev);
13808 struct bnxt *bp = netdev_priv(netdev);
13811 netdev_info(bp->dev, "PCI Slot Resume\n");
13814 err = bnxt_hwrm_func_qcaps(bp);
13815 if (!err && netif_running(netdev))
13816 err = bnxt_open(netdev);
13818 bnxt_ulp_start(bp, err);
13820 bnxt_reenable_sriov(bp);
13821 netif_device_attach(netdev);
13827 static const struct pci_error_handlers bnxt_err_handler = {
13828 .error_detected = bnxt_io_error_detected,
13829 .slot_reset = bnxt_io_slot_reset,
13830 .resume = bnxt_io_resume
13833 static struct pci_driver bnxt_pci_driver = {
13834 .name = DRV_MODULE_NAME,
13835 .id_table = bnxt_pci_tbl,
13836 .probe = bnxt_init_one,
13837 .remove = bnxt_remove_one,
13838 .shutdown = bnxt_shutdown,
13839 .driver.pm = BNXT_PM_OPS,
13840 .err_handler = &bnxt_err_handler,
13841 #if defined(CONFIG_BNXT_SRIOV)
13842 .sriov_configure = bnxt_sriov_configure,
13846 static int __init bnxt_init(void)
13849 return pci_register_driver(&bnxt_pci_driver);
13852 static void __exit bnxt_exit(void)
13854 pci_unregister_driver(&bnxt_pci_driver);
13856 destroy_workqueue(bnxt_pf_wq);
13860 module_init(bnxt_init);
13861 module_exit(bnxt_exit);