1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/ptp_clock_kernel.h>
53 #include <linux/timecounter.h>
54 #include <linux/cpu_rmap.h>
55 #include <linux/cpumask.h>
56 #include <net/pkt_cls.h>
57 #include <linux/hwmon.h>
58 #include <linux/hwmon-sysfs.h>
59 #include <net/page_pool.h>
63 #include "bnxt_hwrm.h"
65 #include "bnxt_sriov.h"
66 #include "bnxt_ethtool.h"
72 #include "bnxt_devlink.h"
73 #include "bnxt_debugfs.h"
75 #define BNXT_TX_TIMEOUT (5 * HZ)
76 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
79 MODULE_LICENSE("GPL");
80 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
82 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84 #define BNXT_RX_COPY_THRESH 256
86 #define BNXT_TX_PUSH_THRESH 164
133 NETXTREME_E_P5_VF_HV,
136 /* indexed by enum above */
137 static const struct {
140 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
141 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
142 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
143 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
144 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
145 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
146 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
147 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
148 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
149 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
151 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
152 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
153 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
154 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
155 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
156 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
157 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
158 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
159 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
160 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
161 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
162 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
163 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
164 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
165 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
166 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
167 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
168 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
169 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
170 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
171 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
172 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
173 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
174 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
175 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
176 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
177 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
178 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
179 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
180 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
181 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
182 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
183 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
184 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
187 static const struct pci_device_id bnxt_pci_tbl[] = {
188 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
189 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
190 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
191 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
193 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
194 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
195 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
196 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
197 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
198 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
199 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
200 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
201 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
202 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
203 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
204 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
205 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
206 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
207 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
208 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
209 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
210 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
211 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
212 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
213 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
214 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
215 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
216 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
217 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
218 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
219 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
220 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
221 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
222 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
223 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
224 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
225 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
226 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
227 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
228 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
229 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
230 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
231 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
232 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
233 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
234 #ifdef CONFIG_BNXT_SRIOV
235 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
236 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
237 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
238 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
239 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
240 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
241 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
242 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
243 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
244 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
245 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
246 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
247 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
248 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
249 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
250 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
251 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
252 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
253 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
254 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
255 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
260 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
262 static const u16 bnxt_vf_req_snif[] = {
266 HWRM_CFA_L2_FILTER_ALLOC,
269 static const u16 bnxt_async_events_arr[] = {
270 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
271 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
272 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
273 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
274 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
275 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
276 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
277 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
278 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
279 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
280 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
281 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
282 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
283 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
284 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
287 static struct workqueue_struct *bnxt_pf_wq;
289 static bool bnxt_vf_pciid(enum board_idx idx)
291 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
292 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
293 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
294 idx == NETXTREME_E_P5_VF_HV);
297 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
298 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
299 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
301 #define BNXT_CP_DB_IRQ_DIS(db) \
302 writel(DB_CP_IRQ_DIS_FLAGS, db)
304 #define BNXT_DB_CQ(db, idx) \
305 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
307 #define BNXT_DB_NQ_P5(db, idx) \
308 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
310 #define BNXT_DB_CQ_ARM(db, idx) \
311 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
313 #define BNXT_DB_NQ_ARM_P5(db, idx) \
314 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
316 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
318 if (bp->flags & BNXT_FLAG_CHIP_P5)
319 BNXT_DB_NQ_P5(db, idx);
324 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
326 if (bp->flags & BNXT_FLAG_CHIP_P5)
327 BNXT_DB_NQ_ARM_P5(db, idx);
329 BNXT_DB_CQ_ARM(db, idx);
332 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
334 if (bp->flags & BNXT_FLAG_CHIP_P5)
335 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
341 const u16 bnxt_lhint_arr[] = {
342 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
343 TX_BD_FLAGS_LHINT_512_TO_1023,
344 TX_BD_FLAGS_LHINT_1024_TO_2047,
345 TX_BD_FLAGS_LHINT_1024_TO_2047,
346 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
347 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
348 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
357 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
358 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
359 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
360 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
363 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
365 struct metadata_dst *md_dst = skb_metadata_dst(skb);
367 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
370 return md_dst->u.port_info.port_id;
373 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
376 bnxt_db_write(bp, &txr->tx_db, prod);
377 txr->kick_pending = 0;
380 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
381 struct bnxt_tx_ring_info *txr,
382 struct netdev_queue *txq)
384 netif_tx_stop_queue(txq);
386 /* netif_tx_stop_queue() must be done before checking
387 * tx index in bnxt_tx_avail() below, because in
388 * bnxt_tx_int(), we update tx index before checking for
389 * netif_tx_queue_stopped().
392 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
393 netif_tx_wake_queue(txq);
400 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
402 struct bnxt *bp = netdev_priv(dev);
404 struct tx_bd_ext *txbd1;
405 struct netdev_queue *txq;
408 unsigned int length, pad = 0;
409 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
411 struct pci_dev *pdev = bp->pdev;
412 struct bnxt_tx_ring_info *txr;
413 struct bnxt_sw_tx_bd *tx_buf;
416 i = skb_get_queue_mapping(skb);
417 if (unlikely(i >= bp->tx_nr_rings)) {
418 dev_kfree_skb_any(skb);
419 atomic_long_inc(&dev->tx_dropped);
423 txq = netdev_get_tx_queue(dev, i);
424 txr = &bp->tx_ring[bp->tx_ring_map[i]];
427 free_size = bnxt_tx_avail(bp, txr);
428 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
429 /* We must have raced with NAPI cleanup */
430 if (net_ratelimit() && txr->kick_pending)
431 netif_warn(bp, tx_err, dev,
432 "bnxt: ring busy w/ flush pending!\n");
433 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
434 return NETDEV_TX_BUSY;
438 len = skb_headlen(skb);
439 last_frag = skb_shinfo(skb)->nr_frags;
441 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
443 txbd->tx_bd_opaque = prod;
445 tx_buf = &txr->tx_buf_ring[prod];
447 tx_buf->nr_frags = last_frag;
450 cfa_action = bnxt_xmit_get_cfa_action(skb);
451 if (skb_vlan_tag_present(skb)) {
452 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
453 skb_vlan_tag_get(skb);
454 /* Currently supports 8021Q, 8021AD vlan offloads
455 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
457 if (skb->vlan_proto == htons(ETH_P_8021Q))
458 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
461 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
462 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
464 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
465 atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
466 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
469 ptp->tx_hdr_off += VLAN_HLEN;
470 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
471 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
473 atomic_inc(&bp->ptp_cfg->tx_avail);
478 if (unlikely(skb->no_fcs))
479 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
481 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
483 struct tx_push_buffer *tx_push_buf = txr->tx_push;
484 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
485 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
486 void __iomem *db = txr->tx_db.doorbell;
487 void *pdata = tx_push_buf->data;
491 /* Set COAL_NOW to be ready quickly for the next push */
492 tx_push->tx_bd_len_flags_type =
493 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
494 TX_BD_TYPE_LONG_TX_BD |
495 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
496 TX_BD_FLAGS_COAL_NOW |
497 TX_BD_FLAGS_PACKET_END |
498 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
500 if (skb->ip_summed == CHECKSUM_PARTIAL)
501 tx_push1->tx_bd_hsize_lflags =
502 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
504 tx_push1->tx_bd_hsize_lflags = 0;
506 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
507 tx_push1->tx_bd_cfa_action =
508 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
510 end = pdata + length;
511 end = PTR_ALIGN(end, 8) - 1;
514 skb_copy_from_linear_data(skb, pdata, len);
516 for (j = 0; j < last_frag; j++) {
517 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
520 fptr = skb_frag_address_safe(frag);
524 memcpy(pdata, fptr, skb_frag_size(frag));
525 pdata += skb_frag_size(frag);
528 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
529 txbd->tx_bd_haddr = txr->data_mapping;
530 prod = NEXT_TX(prod);
531 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
532 memcpy(txbd, tx_push1, sizeof(*txbd));
533 prod = NEXT_TX(prod);
535 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
539 netdev_tx_sent_queue(txq, skb->len);
540 wmb(); /* Sync is_push and byte queue before pushing data */
542 push_len = (length + sizeof(*tx_push) + 7) / 8;
544 __iowrite64_copy(db, tx_push_buf, 16);
545 __iowrite32_copy(db + 4, tx_push_buf + 1,
546 (push_len - 16) << 1);
548 __iowrite64_copy(db, tx_push_buf, push_len);
555 if (length < BNXT_MIN_PKT_SIZE) {
556 pad = BNXT_MIN_PKT_SIZE - length;
557 if (skb_pad(skb, pad))
558 /* SKB already freed. */
559 goto tx_kick_pending;
560 length = BNXT_MIN_PKT_SIZE;
563 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
565 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
568 dma_unmap_addr_set(tx_buf, mapping, mapping);
569 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
570 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
572 txbd->tx_bd_haddr = cpu_to_le64(mapping);
574 prod = NEXT_TX(prod);
575 txbd1 = (struct tx_bd_ext *)
576 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
578 txbd1->tx_bd_hsize_lflags = lflags;
579 if (skb_is_gso(skb)) {
582 if (skb->encapsulation)
583 hdr_len = skb_inner_network_offset(skb) +
584 skb_inner_network_header_len(skb) +
585 inner_tcp_hdrlen(skb);
587 hdr_len = skb_transport_offset(skb) +
590 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
592 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
593 length = skb_shinfo(skb)->gso_size;
594 txbd1->tx_bd_mss = cpu_to_le32(length);
596 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
597 txbd1->tx_bd_hsize_lflags |=
598 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
599 txbd1->tx_bd_mss = 0;
603 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
604 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
609 flags |= bnxt_lhint_arr[length];
610 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
612 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
613 txbd1->tx_bd_cfa_action =
614 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
615 for (i = 0; i < last_frag; i++) {
616 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
618 prod = NEXT_TX(prod);
619 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
621 len = skb_frag_size(frag);
622 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
625 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
628 tx_buf = &txr->tx_buf_ring[prod];
629 dma_unmap_addr_set(tx_buf, mapping, mapping);
631 txbd->tx_bd_haddr = cpu_to_le64(mapping);
633 flags = len << TX_BD_LEN_SHIFT;
634 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
638 txbd->tx_bd_len_flags_type =
639 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
640 TX_BD_FLAGS_PACKET_END);
642 netdev_tx_sent_queue(txq, skb->len);
644 skb_tx_timestamp(skb);
646 /* Sync BD data before updating doorbell */
649 prod = NEXT_TX(prod);
652 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
653 bnxt_txr_db_kick(bp, txr, prod);
655 txr->kick_pending = 1;
659 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
660 if (netdev_xmit_more() && !tx_buf->is_push)
661 bnxt_txr_db_kick(bp, txr, prod);
663 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
668 if (BNXT_TX_PTP_IS_SET(lflags))
669 atomic_inc(&bp->ptp_cfg->tx_avail);
673 /* start back at beginning and unmap skb */
675 tx_buf = &txr->tx_buf_ring[prod];
676 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
677 skb_headlen(skb), DMA_TO_DEVICE);
678 prod = NEXT_TX(prod);
680 /* unmap remaining mapped pages */
681 for (i = 0; i < last_frag; i++) {
682 prod = NEXT_TX(prod);
683 tx_buf = &txr->tx_buf_ring[prod];
684 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
685 skb_frag_size(&skb_shinfo(skb)->frags[i]),
690 dev_kfree_skb_any(skb);
692 if (txr->kick_pending)
693 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
694 txr->tx_buf_ring[txr->tx_prod].skb = NULL;
695 atomic_long_inc(&dev->tx_dropped);
699 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
701 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
702 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
703 u16 cons = txr->tx_cons;
704 struct pci_dev *pdev = bp->pdev;
706 unsigned int tx_bytes = 0;
708 for (i = 0; i < nr_pkts; i++) {
709 struct bnxt_sw_tx_bd *tx_buf;
710 bool compl_deferred = false;
714 tx_buf = &txr->tx_buf_ring[cons];
715 cons = NEXT_TX(cons);
719 if (tx_buf->is_push) {
724 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
725 skb_headlen(skb), DMA_TO_DEVICE);
726 last = tx_buf->nr_frags;
728 for (j = 0; j < last; j++) {
729 cons = NEXT_TX(cons);
730 tx_buf = &txr->tx_buf_ring[cons];
733 dma_unmap_addr(tx_buf, mapping),
734 skb_frag_size(&skb_shinfo(skb)->frags[j]),
737 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
738 if (bp->flags & BNXT_FLAG_CHIP_P5) {
739 if (!bnxt_get_tx_ts_p5(bp, skb))
740 compl_deferred = true;
742 atomic_inc(&bp->ptp_cfg->tx_avail);
747 cons = NEXT_TX(cons);
749 tx_bytes += skb->len;
751 dev_kfree_skb_any(skb);
754 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
757 /* Need to make the tx_cons update visible to bnxt_start_xmit()
758 * before checking for netif_tx_queue_stopped(). Without the
759 * memory barrier, there is a small possibility that bnxt_start_xmit()
760 * will miss it and cause the queue to be stopped forever.
764 if (unlikely(netif_tx_queue_stopped(txq)) &&
765 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
766 READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
767 netif_tx_wake_queue(txq);
770 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
771 struct bnxt_rx_ring_info *rxr,
774 struct device *dev = &bp->pdev->dev;
777 page = page_pool_dev_alloc_pages(rxr->page_pool);
781 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
782 DMA_ATTR_WEAK_ORDERING);
783 if (dma_mapping_error(dev, *mapping)) {
784 page_pool_recycle_direct(rxr->page_pool, page);
787 *mapping += bp->rx_dma_offset;
791 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
795 struct pci_dev *pdev = bp->pdev;
797 data = kmalloc(bp->rx_buf_size, gfp);
801 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
802 bp->rx_buf_use_size, bp->rx_dir,
803 DMA_ATTR_WEAK_ORDERING);
805 if (dma_mapping_error(&pdev->dev, *mapping)) {
812 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
815 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
816 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
819 if (BNXT_RX_PAGE_MODE(bp)) {
821 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
827 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
829 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
835 rx_buf->data_ptr = data + bp->rx_offset;
837 rx_buf->mapping = mapping;
839 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
843 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
845 u16 prod = rxr->rx_prod;
846 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
847 struct rx_bd *cons_bd, *prod_bd;
849 prod_rx_buf = &rxr->rx_buf_ring[prod];
850 cons_rx_buf = &rxr->rx_buf_ring[cons];
852 prod_rx_buf->data = data;
853 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
855 prod_rx_buf->mapping = cons_rx_buf->mapping;
857 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
858 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
860 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
863 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
865 u16 next, max = rxr->rx_agg_bmap_size;
867 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
869 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
873 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
874 struct bnxt_rx_ring_info *rxr,
878 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
879 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
880 struct pci_dev *pdev = bp->pdev;
883 u16 sw_prod = rxr->rx_sw_agg_prod;
884 unsigned int offset = 0;
886 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
889 page = alloc_page(gfp);
893 rxr->rx_page_offset = 0;
895 offset = rxr->rx_page_offset;
896 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
897 if (rxr->rx_page_offset == PAGE_SIZE)
902 page = alloc_page(gfp);
907 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
908 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
909 DMA_ATTR_WEAK_ORDERING);
910 if (dma_mapping_error(&pdev->dev, mapping)) {
915 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
916 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
918 __set_bit(sw_prod, rxr->rx_agg_bmap);
919 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
920 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
922 rx_agg_buf->page = page;
923 rx_agg_buf->offset = offset;
924 rx_agg_buf->mapping = mapping;
925 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
926 rxbd->rx_bd_opaque = sw_prod;
930 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
931 struct bnxt_cp_ring_info *cpr,
932 u16 cp_cons, u16 curr)
934 struct rx_agg_cmp *agg;
936 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
937 agg = (struct rx_agg_cmp *)
938 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
942 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
943 struct bnxt_rx_ring_info *rxr,
944 u16 agg_id, u16 curr)
946 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
948 return &tpa_info->agg_arr[curr];
951 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
952 u16 start, u32 agg_bufs, bool tpa)
954 struct bnxt_napi *bnapi = cpr->bnapi;
955 struct bnxt *bp = bnapi->bp;
956 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
957 u16 prod = rxr->rx_agg_prod;
958 u16 sw_prod = rxr->rx_sw_agg_prod;
962 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
965 for (i = 0; i < agg_bufs; i++) {
967 struct rx_agg_cmp *agg;
968 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
969 struct rx_bd *prod_bd;
973 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
975 agg = bnxt_get_agg(bp, cpr, idx, start + i);
976 cons = agg->rx_agg_cmp_opaque;
977 __clear_bit(cons, rxr->rx_agg_bmap);
979 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
980 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
982 __set_bit(sw_prod, rxr->rx_agg_bmap);
983 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
984 cons_rx_buf = &rxr->rx_agg_ring[cons];
986 /* It is possible for sw_prod to be equal to cons, so
987 * set cons_rx_buf->page to NULL first.
989 page = cons_rx_buf->page;
990 cons_rx_buf->page = NULL;
991 prod_rx_buf->page = page;
992 prod_rx_buf->offset = cons_rx_buf->offset;
994 prod_rx_buf->mapping = cons_rx_buf->mapping;
996 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
998 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
999 prod_bd->rx_bd_opaque = sw_prod;
1001 prod = NEXT_RX_AGG(prod);
1002 sw_prod = NEXT_RX_AGG(sw_prod);
1004 rxr->rx_agg_prod = prod;
1005 rxr->rx_sw_agg_prod = sw_prod;
1008 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1009 struct bnxt_rx_ring_info *rxr,
1010 u16 cons, void *data, u8 *data_ptr,
1011 dma_addr_t dma_addr,
1012 unsigned int offset_and_len)
1014 unsigned int payload = offset_and_len >> 16;
1015 unsigned int len = offset_and_len & 0xffff;
1017 struct page *page = data;
1018 u16 prod = rxr->rx_prod;
1019 struct sk_buff *skb;
1022 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1023 if (unlikely(err)) {
1024 bnxt_reuse_rx_data(rxr, cons, data);
1027 dma_addr -= bp->rx_dma_offset;
1028 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1029 DMA_ATTR_WEAK_ORDERING);
1030 page_pool_release_page(rxr->page_pool, page);
1032 if (unlikely(!payload))
1033 payload = eth_get_headlen(bp->dev, data_ptr, len);
1035 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1041 off = (void *)data_ptr - page_address(page);
1042 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1043 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1044 payload + NET_IP_ALIGN);
1046 frag = &skb_shinfo(skb)->frags[0];
1047 skb_frag_size_sub(frag, payload);
1048 skb_frag_off_add(frag, payload);
1049 skb->data_len -= payload;
1050 skb->tail += payload;
1055 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1056 struct bnxt_rx_ring_info *rxr, u16 cons,
1057 void *data, u8 *data_ptr,
1058 dma_addr_t dma_addr,
1059 unsigned int offset_and_len)
1061 u16 prod = rxr->rx_prod;
1062 struct sk_buff *skb;
1065 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1066 if (unlikely(err)) {
1067 bnxt_reuse_rx_data(rxr, cons, data);
1071 skb = build_skb(data, 0);
1072 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1073 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1079 skb_reserve(skb, bp->rx_offset);
1080 skb_put(skb, offset_and_len & 0xffff);
1084 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1085 struct bnxt_cp_ring_info *cpr,
1086 struct sk_buff *skb, u16 idx,
1087 u32 agg_bufs, bool tpa)
1089 struct bnxt_napi *bnapi = cpr->bnapi;
1090 struct pci_dev *pdev = bp->pdev;
1091 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1092 u16 prod = rxr->rx_agg_prod;
1093 bool p5_tpa = false;
1096 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1099 for (i = 0; i < agg_bufs; i++) {
1101 struct rx_agg_cmp *agg;
1102 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1107 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1109 agg = bnxt_get_agg(bp, cpr, idx, i);
1110 cons = agg->rx_agg_cmp_opaque;
1111 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1112 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1114 cons_rx_buf = &rxr->rx_agg_ring[cons];
1115 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1116 cons_rx_buf->offset, frag_len);
1117 __clear_bit(cons, rxr->rx_agg_bmap);
1119 /* It is possible for bnxt_alloc_rx_page() to allocate
1120 * a sw_prod index that equals the cons index, so we
1121 * need to clear the cons entry now.
1123 mapping = cons_rx_buf->mapping;
1124 page = cons_rx_buf->page;
1125 cons_rx_buf->page = NULL;
1127 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1128 struct skb_shared_info *shinfo;
1129 unsigned int nr_frags;
1131 shinfo = skb_shinfo(skb);
1132 nr_frags = --shinfo->nr_frags;
1133 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1137 cons_rx_buf->page = page;
1139 /* Update prod since possibly some pages have been
1140 * allocated already.
1142 rxr->rx_agg_prod = prod;
1143 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1147 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1149 DMA_ATTR_WEAK_ORDERING);
1151 skb->data_len += frag_len;
1152 skb->len += frag_len;
1153 skb->truesize += PAGE_SIZE;
1155 prod = NEXT_RX_AGG(prod);
1157 rxr->rx_agg_prod = prod;
1161 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1162 u8 agg_bufs, u32 *raw_cons)
1165 struct rx_agg_cmp *agg;
1167 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1168 last = RING_CMP(*raw_cons);
1169 agg = (struct rx_agg_cmp *)
1170 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1171 return RX_AGG_CMP_VALID(agg, *raw_cons);
1174 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1178 struct bnxt *bp = bnapi->bp;
1179 struct pci_dev *pdev = bp->pdev;
1180 struct sk_buff *skb;
1182 skb = napi_alloc_skb(&bnapi->napi, len);
1186 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1189 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1190 len + NET_IP_ALIGN);
1192 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1199 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1200 u32 *raw_cons, void *cmp)
1202 struct rx_cmp *rxcmp = cmp;
1203 u32 tmp_raw_cons = *raw_cons;
1204 u8 cmp_type, agg_bufs = 0;
1206 cmp_type = RX_CMP_TYPE(rxcmp);
1208 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1209 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1211 RX_CMP_AGG_BUFS_SHIFT;
1212 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1213 struct rx_tpa_end_cmp *tpa_end = cmp;
1215 if (bp->flags & BNXT_FLAG_CHIP_P5)
1218 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1222 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1225 *raw_cons = tmp_raw_cons;
1229 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1231 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1235 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1237 schedule_delayed_work(&bp->fw_reset_task, delay);
1240 static void bnxt_queue_sp_work(struct bnxt *bp)
1243 queue_work(bnxt_pf_wq, &bp->sp_task);
1245 schedule_work(&bp->sp_task);
1248 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1250 if (!rxr->bnapi->in_reset) {
1251 rxr->bnapi->in_reset = true;
1252 if (bp->flags & BNXT_FLAG_CHIP_P5)
1253 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1255 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1256 bnxt_queue_sp_work(bp);
1258 rxr->rx_next_cons = 0xffff;
1261 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1263 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1264 u16 idx = agg_id & MAX_TPA_P5_MASK;
1266 if (test_bit(idx, map->agg_idx_bmap))
1267 idx = find_first_zero_bit(map->agg_idx_bmap,
1268 BNXT_AGG_IDX_BMAP_SIZE);
1269 __set_bit(idx, map->agg_idx_bmap);
1270 map->agg_id_tbl[agg_id] = idx;
1274 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1276 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1278 __clear_bit(idx, map->agg_idx_bmap);
1281 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1283 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1285 return map->agg_id_tbl[agg_id];
1288 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1289 struct rx_tpa_start_cmp *tpa_start,
1290 struct rx_tpa_start_cmp_ext *tpa_start1)
1292 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1293 struct bnxt_tpa_info *tpa_info;
1294 u16 cons, prod, agg_id;
1295 struct rx_bd *prod_bd;
1298 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1299 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1300 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1302 agg_id = TPA_START_AGG_ID(tpa_start);
1304 cons = tpa_start->rx_tpa_start_cmp_opaque;
1305 prod = rxr->rx_prod;
1306 cons_rx_buf = &rxr->rx_buf_ring[cons];
1307 prod_rx_buf = &rxr->rx_buf_ring[prod];
1308 tpa_info = &rxr->rx_tpa[agg_id];
1310 if (unlikely(cons != rxr->rx_next_cons ||
1311 TPA_START_ERROR(tpa_start))) {
1312 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1313 cons, rxr->rx_next_cons,
1314 TPA_START_ERROR_CODE(tpa_start1));
1315 bnxt_sched_reset(bp, rxr);
1318 /* Store cfa_code in tpa_info to use in tpa_end
1319 * completion processing.
1321 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1322 prod_rx_buf->data = tpa_info->data;
1323 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1325 mapping = tpa_info->mapping;
1326 prod_rx_buf->mapping = mapping;
1328 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1330 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1332 tpa_info->data = cons_rx_buf->data;
1333 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1334 cons_rx_buf->data = NULL;
1335 tpa_info->mapping = cons_rx_buf->mapping;
1338 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1339 RX_TPA_START_CMP_LEN_SHIFT;
1340 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1341 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1343 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1344 tpa_info->gso_type = SKB_GSO_TCPV4;
1345 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1346 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1347 tpa_info->gso_type = SKB_GSO_TCPV6;
1348 tpa_info->rss_hash =
1349 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1351 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1352 tpa_info->gso_type = 0;
1353 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1355 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1356 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1357 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1358 tpa_info->agg_count = 0;
1360 rxr->rx_prod = NEXT_RX(prod);
1361 cons = NEXT_RX(cons);
1362 rxr->rx_next_cons = NEXT_RX(cons);
1363 cons_rx_buf = &rxr->rx_buf_ring[cons];
1365 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1366 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1367 cons_rx_buf->data = NULL;
1370 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1373 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1377 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1379 struct udphdr *uh = NULL;
1381 if (ip_proto == htons(ETH_P_IP)) {
1382 struct iphdr *iph = (struct iphdr *)skb->data;
1384 if (iph->protocol == IPPROTO_UDP)
1385 uh = (struct udphdr *)(iph + 1);
1387 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1389 if (iph->nexthdr == IPPROTO_UDP)
1390 uh = (struct udphdr *)(iph + 1);
1394 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1396 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1401 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1402 int payload_off, int tcp_ts,
1403 struct sk_buff *skb)
1408 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1409 u32 hdr_info = tpa_info->hdr_info;
1410 bool loopback = false;
1412 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1413 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1414 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1416 /* If the packet is an internal loopback packet, the offsets will
1417 * have an extra 4 bytes.
1419 if (inner_mac_off == 4) {
1421 } else if (inner_mac_off > 4) {
1422 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1425 /* We only support inner iPv4/ipv6. If we don't see the
1426 * correct protocol ID, it must be a loopback packet where
1427 * the offsets are off by 4.
1429 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1433 /* internal loopback packet, subtract all offsets by 4 */
1439 nw_off = inner_ip_off - ETH_HLEN;
1440 skb_set_network_header(skb, nw_off);
1441 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1442 struct ipv6hdr *iph = ipv6_hdr(skb);
1444 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1445 len = skb->len - skb_transport_offset(skb);
1447 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1449 struct iphdr *iph = ip_hdr(skb);
1451 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1452 len = skb->len - skb_transport_offset(skb);
1454 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1457 if (inner_mac_off) { /* tunnel */
1458 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1461 bnxt_gro_tunnel(skb, proto);
1467 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1468 int payload_off, int tcp_ts,
1469 struct sk_buff *skb)
1472 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1473 u32 hdr_info = tpa_info->hdr_info;
1474 int iphdr_len, nw_off;
1476 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1477 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1478 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1480 nw_off = inner_ip_off - ETH_HLEN;
1481 skb_set_network_header(skb, nw_off);
1482 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1483 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1484 skb_set_transport_header(skb, nw_off + iphdr_len);
1486 if (inner_mac_off) { /* tunnel */
1487 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1490 bnxt_gro_tunnel(skb, proto);
1496 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1497 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1499 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1500 int payload_off, int tcp_ts,
1501 struct sk_buff *skb)
1505 int len, nw_off, tcp_opt_len = 0;
1510 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1513 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1515 skb_set_network_header(skb, nw_off);
1517 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1518 len = skb->len - skb_transport_offset(skb);
1520 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1521 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1522 struct ipv6hdr *iph;
1524 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1526 skb_set_network_header(skb, nw_off);
1527 iph = ipv6_hdr(skb);
1528 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1529 len = skb->len - skb_transport_offset(skb);
1531 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1533 dev_kfree_skb_any(skb);
1537 if (nw_off) /* tunnel */
1538 bnxt_gro_tunnel(skb, skb->protocol);
1543 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1544 struct bnxt_tpa_info *tpa_info,
1545 struct rx_tpa_end_cmp *tpa_end,
1546 struct rx_tpa_end_cmp_ext *tpa_end1,
1547 struct sk_buff *skb)
1553 segs = TPA_END_TPA_SEGS(tpa_end);
1557 NAPI_GRO_CB(skb)->count = segs;
1558 skb_shinfo(skb)->gso_size =
1559 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1560 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1561 if (bp->flags & BNXT_FLAG_CHIP_P5)
1562 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1564 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1565 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1567 tcp_gro_complete(skb);
1572 /* Given the cfa_code of a received packet determine which
1573 * netdev (vf-rep or PF) the packet is destined to.
1575 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1577 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1579 /* if vf-rep dev is NULL, the must belongs to the PF */
1580 return dev ? dev : bp->dev;
1583 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1584 struct bnxt_cp_ring_info *cpr,
1586 struct rx_tpa_end_cmp *tpa_end,
1587 struct rx_tpa_end_cmp_ext *tpa_end1,
1590 struct bnxt_napi *bnapi = cpr->bnapi;
1591 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1592 u8 *data_ptr, agg_bufs;
1594 struct bnxt_tpa_info *tpa_info;
1596 struct sk_buff *skb;
1597 u16 idx = 0, agg_id;
1601 if (unlikely(bnapi->in_reset)) {
1602 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1605 return ERR_PTR(-EBUSY);
1609 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1610 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1611 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1612 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1613 tpa_info = &rxr->rx_tpa[agg_id];
1614 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1615 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1616 agg_bufs, tpa_info->agg_count);
1617 agg_bufs = tpa_info->agg_count;
1619 tpa_info->agg_count = 0;
1620 *event |= BNXT_AGG_EVENT;
1621 bnxt_free_agg_idx(rxr, agg_id);
1623 gro = !!(bp->flags & BNXT_FLAG_GRO);
1625 agg_id = TPA_END_AGG_ID(tpa_end);
1626 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1627 tpa_info = &rxr->rx_tpa[agg_id];
1628 idx = RING_CMP(*raw_cons);
1630 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1631 return ERR_PTR(-EBUSY);
1633 *event |= BNXT_AGG_EVENT;
1634 idx = NEXT_CMP(idx);
1636 gro = !!TPA_END_GRO(tpa_end);
1638 data = tpa_info->data;
1639 data_ptr = tpa_info->data_ptr;
1641 len = tpa_info->len;
1642 mapping = tpa_info->mapping;
1644 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1645 bnxt_abort_tpa(cpr, idx, agg_bufs);
1646 if (agg_bufs > MAX_SKB_FRAGS)
1647 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1648 agg_bufs, (int)MAX_SKB_FRAGS);
1652 if (len <= bp->rx_copy_thresh) {
1653 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1655 bnxt_abort_tpa(cpr, idx, agg_bufs);
1656 cpr->sw_stats.rx.rx_oom_discards += 1;
1661 dma_addr_t new_mapping;
1663 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1665 bnxt_abort_tpa(cpr, idx, agg_bufs);
1666 cpr->sw_stats.rx.rx_oom_discards += 1;
1670 tpa_info->data = new_data;
1671 tpa_info->data_ptr = new_data + bp->rx_offset;
1672 tpa_info->mapping = new_mapping;
1674 skb = build_skb(data, 0);
1675 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1676 bp->rx_buf_use_size, bp->rx_dir,
1677 DMA_ATTR_WEAK_ORDERING);
1681 bnxt_abort_tpa(cpr, idx, agg_bufs);
1682 cpr->sw_stats.rx.rx_oom_discards += 1;
1685 skb_reserve(skb, bp->rx_offset);
1690 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1692 /* Page reuse already handled by bnxt_rx_pages(). */
1693 cpr->sw_stats.rx.rx_oom_discards += 1;
1699 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1701 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1702 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1704 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1705 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1706 __be16 vlan_proto = htons(tpa_info->metadata >>
1707 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1708 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1710 if (eth_type_vlan(vlan_proto)) {
1711 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1718 skb_checksum_none_assert(skb);
1719 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1720 skb->ip_summed = CHECKSUM_UNNECESSARY;
1722 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1726 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1731 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1732 struct rx_agg_cmp *rx_agg)
1734 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1735 struct bnxt_tpa_info *tpa_info;
1737 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1738 tpa_info = &rxr->rx_tpa[agg_id];
1739 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1740 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1743 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1744 struct sk_buff *skb)
1746 if (skb->dev != bp->dev) {
1747 /* this packet belongs to a vf-rep */
1748 bnxt_vf_rep_rx(bp, skb);
1751 skb_record_rx_queue(skb, bnapi->index);
1752 napi_gro_receive(&bnapi->napi, skb);
1755 /* returns the following:
1756 * 1 - 1 packet successfully received
1757 * 0 - successful TPA_START, packet not completed yet
1758 * -EBUSY - completion ring does not have all the agg buffers yet
1759 * -ENOMEM - packet aborted due to out of memory
1760 * -EIO - packet aborted due to hw error indicated in BD
1762 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1763 u32 *raw_cons, u8 *event)
1765 struct bnxt_napi *bnapi = cpr->bnapi;
1766 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1767 struct net_device *dev = bp->dev;
1768 struct rx_cmp *rxcmp;
1769 struct rx_cmp_ext *rxcmp1;
1770 u32 tmp_raw_cons = *raw_cons;
1771 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1772 struct bnxt_sw_rx_bd *rx_buf;
1774 u8 *data_ptr, agg_bufs, cmp_type;
1775 dma_addr_t dma_addr;
1776 struct sk_buff *skb;
1781 rxcmp = (struct rx_cmp *)
1782 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1784 cmp_type = RX_CMP_TYPE(rxcmp);
1786 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1787 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1788 goto next_rx_no_prod_no_len;
1791 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1792 cp_cons = RING_CMP(tmp_raw_cons);
1793 rxcmp1 = (struct rx_cmp_ext *)
1794 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1796 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1799 /* The valid test of the entry must be done first before
1800 * reading any further.
1803 prod = rxr->rx_prod;
1805 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1806 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1807 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1809 *event |= BNXT_RX_EVENT;
1810 goto next_rx_no_prod_no_len;
1812 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1813 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1814 (struct rx_tpa_end_cmp *)rxcmp,
1815 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1822 bnxt_deliver_skb(bp, bnapi, skb);
1825 *event |= BNXT_RX_EVENT;
1826 goto next_rx_no_prod_no_len;
1829 cons = rxcmp->rx_cmp_opaque;
1830 if (unlikely(cons != rxr->rx_next_cons)) {
1831 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1833 /* 0xffff is forced error, don't print it */
1834 if (rxr->rx_next_cons != 0xffff)
1835 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1836 cons, rxr->rx_next_cons);
1837 bnxt_sched_reset(bp, rxr);
1840 goto next_rx_no_prod_no_len;
1842 rx_buf = &rxr->rx_buf_ring[cons];
1843 data = rx_buf->data;
1844 data_ptr = rx_buf->data_ptr;
1847 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1848 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1851 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1854 cp_cons = NEXT_CMP(cp_cons);
1855 *event |= BNXT_AGG_EVENT;
1857 *event |= BNXT_RX_EVENT;
1859 rx_buf->data = NULL;
1860 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1861 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1863 bnxt_reuse_rx_data(rxr, cons, data);
1865 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1869 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1870 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1871 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1872 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1873 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1875 bnxt_sched_reset(bp, rxr);
1878 goto next_rx_no_len;
1881 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1882 len = flags >> RX_CMP_LEN_SHIFT;
1883 dma_addr = rx_buf->mapping;
1885 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1890 if (len <= bp->rx_copy_thresh) {
1891 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1892 bnxt_reuse_rx_data(rxr, cons, data);
1895 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1897 cpr->sw_stats.rx.rx_oom_discards += 1;
1904 if (rx_buf->data_ptr == data_ptr)
1905 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1908 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1911 cpr->sw_stats.rx.rx_oom_discards += 1;
1918 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1920 cpr->sw_stats.rx.rx_oom_discards += 1;
1926 if (RX_CMP_HASH_VALID(rxcmp)) {
1927 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1928 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1930 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1931 if (hash_type != 1 && hash_type != 3)
1932 type = PKT_HASH_TYPE_L3;
1933 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1936 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1937 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1939 if ((rxcmp1->rx_cmp_flags2 &
1940 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1941 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1942 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1943 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1944 __be16 vlan_proto = htons(meta_data >>
1945 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1947 if (eth_type_vlan(vlan_proto)) {
1948 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1955 skb_checksum_none_assert(skb);
1956 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1957 if (dev->features & NETIF_F_RXCSUM) {
1958 skb->ip_summed = CHECKSUM_UNNECESSARY;
1959 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1962 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1963 if (dev->features & NETIF_F_RXCSUM)
1964 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1968 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1969 RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1970 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1971 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1974 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1975 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1977 spin_lock_bh(&ptp->ptp_lock);
1978 ns = timecounter_cyc2time(&ptp->tc, ts);
1979 spin_unlock_bh(&ptp->ptp_lock);
1980 memset(skb_hwtstamps(skb), 0,
1981 sizeof(*skb_hwtstamps(skb)));
1982 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1986 bnxt_deliver_skb(bp, bnapi, skb);
1990 cpr->rx_packets += 1;
1991 cpr->rx_bytes += len;
1994 rxr->rx_prod = NEXT_RX(prod);
1995 rxr->rx_next_cons = NEXT_RX(cons);
1997 next_rx_no_prod_no_len:
1998 *raw_cons = tmp_raw_cons;
2003 /* In netpoll mode, if we are using a combined completion ring, we need to
2004 * discard the rx packets and recycle the buffers.
2006 static int bnxt_force_rx_discard(struct bnxt *bp,
2007 struct bnxt_cp_ring_info *cpr,
2008 u32 *raw_cons, u8 *event)
2010 u32 tmp_raw_cons = *raw_cons;
2011 struct rx_cmp_ext *rxcmp1;
2012 struct rx_cmp *rxcmp;
2017 cp_cons = RING_CMP(tmp_raw_cons);
2018 rxcmp = (struct rx_cmp *)
2019 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2021 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2022 cp_cons = RING_CMP(tmp_raw_cons);
2023 rxcmp1 = (struct rx_cmp_ext *)
2024 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2026 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2029 /* The valid test of the entry must be done first before
2030 * reading any further.
2033 cmp_type = RX_CMP_TYPE(rxcmp);
2034 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2035 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2036 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2037 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2038 struct rx_tpa_end_cmp_ext *tpa_end1;
2040 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2041 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2042 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2044 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2045 if (rc && rc != -EBUSY)
2046 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2050 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2052 struct bnxt_fw_health *fw_health = bp->fw_health;
2053 u32 reg = fw_health->regs[reg_idx];
2054 u32 reg_type, reg_off, val = 0;
2056 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2057 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2059 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2060 pci_read_config_dword(bp->pdev, reg_off, &val);
2062 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2063 reg_off = fw_health->mapped_regs[reg_idx];
2065 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2066 val = readl(bp->bar0 + reg_off);
2068 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2069 val = readl(bp->bar1 + reg_off);
2072 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2073 val &= fw_health->fw_reset_inprog_reg_mask;
2077 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2081 for (i = 0; i < bp->rx_nr_rings; i++) {
2082 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2083 struct bnxt_ring_grp_info *grp_info;
2085 grp_info = &bp->grp_info[grp_idx];
2086 if (grp_info->agg_fw_ring_id == ring_id)
2089 return INVALID_HW_RING_ID;
2092 static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2094 switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
2095 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2096 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2097 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2100 netdev_err(bp->dev, "FW reported unknown error type\n");
2105 #define BNXT_GET_EVENT_PORT(data) \
2107 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2109 #define BNXT_EVENT_RING_TYPE(data2) \
2111 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2113 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2114 (BNXT_EVENT_RING_TYPE(data2) == \
2115 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2117 static int bnxt_async_event_process(struct bnxt *bp,
2118 struct hwrm_async_event_cmpl *cmpl)
2120 u16 event_id = le16_to_cpu(cmpl->event_id);
2121 u32 data1 = le32_to_cpu(cmpl->event_data1);
2122 u32 data2 = le32_to_cpu(cmpl->event_data2);
2124 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2126 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2127 struct bnxt_link_info *link_info = &bp->link_info;
2130 goto async_event_process_exit;
2132 /* print unsupported speed warning in forced speed mode only */
2133 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2134 (data1 & 0x20000)) {
2135 u16 fw_speed = link_info->force_link_speed;
2136 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2138 if (speed != SPEED_UNKNOWN)
2139 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2142 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2145 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2146 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2147 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2149 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2150 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2152 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2153 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2155 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2156 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2161 if (bp->pf.port_id != port_id)
2164 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2167 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2169 goto async_event_process_exit;
2170 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2172 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2173 char *fatal_str = "non-fatal";
2176 goto async_event_process_exit;
2178 bp->fw_reset_timestamp = jiffies;
2179 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2180 if (!bp->fw_reset_min_dsecs)
2181 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2182 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2183 if (!bp->fw_reset_max_dsecs)
2184 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2185 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2186 fatal_str = "fatal";
2187 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2189 netif_warn(bp, hw, bp->dev,
2190 "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2191 fatal_str, data1, data2,
2192 bp->fw_reset_min_dsecs * 100,
2193 bp->fw_reset_max_dsecs * 100);
2194 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2197 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2198 struct bnxt_fw_health *fw_health = bp->fw_health;
2201 goto async_event_process_exit;
2203 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2204 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2205 if (!fw_health->enabled) {
2206 netif_info(bp, drv, bp->dev,
2207 "Error recovery info: error recovery[0]\n");
2210 fw_health->tmr_multiplier =
2211 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2212 bp->current_interval * 10);
2213 fw_health->tmr_counter = fw_health->tmr_multiplier;
2214 fw_health->last_fw_heartbeat =
2215 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2216 fw_health->last_fw_reset_cnt =
2217 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2218 netif_info(bp, drv, bp->dev,
2219 "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2220 fw_health->master, fw_health->last_fw_reset_cnt,
2221 bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2222 goto async_event_process_exit;
2224 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2225 netif_notice(bp, hw, bp->dev,
2226 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2228 goto async_event_process_exit;
2229 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2230 struct bnxt_rx_ring_info *rxr;
2233 if (bp->flags & BNXT_FLAG_CHIP_P5)
2234 goto async_event_process_exit;
2236 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2237 BNXT_EVENT_RING_TYPE(data2), data1);
2238 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2239 goto async_event_process_exit;
2241 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2242 if (grp_idx == INVALID_HW_RING_ID) {
2243 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2245 goto async_event_process_exit;
2247 rxr = bp->bnapi[grp_idx]->rx_ring;
2248 bnxt_sched_reset(bp, rxr);
2249 goto async_event_process_exit;
2251 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2252 struct bnxt_fw_health *fw_health = bp->fw_health;
2254 netif_notice(bp, hw, bp->dev,
2255 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2258 fw_health->echo_req_data1 = data1;
2259 fw_health->echo_req_data2 = data2;
2260 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2263 goto async_event_process_exit;
2265 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2266 bnxt_ptp_pps_event(bp, data1, data2);
2267 goto async_event_process_exit;
2269 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2270 bnxt_event_error_report(bp, data1, data2);
2271 goto async_event_process_exit;
2273 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2274 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2276 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2277 goto async_event_process_exit;
2280 goto async_event_process_exit;
2282 bnxt_queue_sp_work(bp);
2283 async_event_process_exit:
2284 bnxt_ulp_async_events(bp, cmpl);
2288 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2290 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2291 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2292 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2293 (struct hwrm_fwd_req_cmpl *)txcmp;
2295 switch (cmpl_type) {
2296 case CMPL_BASE_TYPE_HWRM_DONE:
2297 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2298 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2301 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2302 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2304 if ((vf_id < bp->pf.first_vf_id) ||
2305 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2306 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2311 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2312 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2313 bnxt_queue_sp_work(bp);
2316 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2317 bnxt_async_event_process(bp,
2318 (struct hwrm_async_event_cmpl *)txcmp);
2328 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2330 struct bnxt_napi *bnapi = dev_instance;
2331 struct bnxt *bp = bnapi->bp;
2332 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2333 u32 cons = RING_CMP(cpr->cp_raw_cons);
2336 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2337 napi_schedule(&bnapi->napi);
2341 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2343 u32 raw_cons = cpr->cp_raw_cons;
2344 u16 cons = RING_CMP(raw_cons);
2345 struct tx_cmp *txcmp;
2347 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2349 return TX_CMP_VALID(txcmp, raw_cons);
2352 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2354 struct bnxt_napi *bnapi = dev_instance;
2355 struct bnxt *bp = bnapi->bp;
2356 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2357 u32 cons = RING_CMP(cpr->cp_raw_cons);
2360 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2362 if (!bnxt_has_work(bp, cpr)) {
2363 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2364 /* return if erroneous interrupt */
2365 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2369 /* disable ring IRQ */
2370 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2372 /* Return here if interrupt is shared and is disabled. */
2373 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2376 napi_schedule(&bnapi->napi);
2380 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2383 struct bnxt_napi *bnapi = cpr->bnapi;
2384 u32 raw_cons = cpr->cp_raw_cons;
2389 struct tx_cmp *txcmp;
2391 cpr->has_more_work = 0;
2392 cpr->had_work_done = 1;
2396 cons = RING_CMP(raw_cons);
2397 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2399 if (!TX_CMP_VALID(txcmp, raw_cons))
2402 /* The valid test of the entry must be done first before
2403 * reading any further.
2406 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2408 /* return full budget so NAPI will complete. */
2409 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2411 raw_cons = NEXT_RAW_CMP(raw_cons);
2413 cpr->has_more_work = 1;
2416 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2418 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2420 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2422 if (likely(rc >= 0))
2424 /* Increment rx_pkts when rc is -ENOMEM to count towards
2425 * the NAPI budget. Otherwise, we may potentially loop
2426 * here forever if we consistently cannot allocate
2429 else if (rc == -ENOMEM && budget)
2431 else if (rc == -EBUSY) /* partial completion */
2433 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2434 CMPL_BASE_TYPE_HWRM_DONE) ||
2435 (TX_CMP_TYPE(txcmp) ==
2436 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2437 (TX_CMP_TYPE(txcmp) ==
2438 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2439 bnxt_hwrm_handler(bp, txcmp);
2441 raw_cons = NEXT_RAW_CMP(raw_cons);
2443 if (rx_pkts && rx_pkts == budget) {
2444 cpr->has_more_work = 1;
2449 if (event & BNXT_REDIRECT_EVENT)
2452 if (event & BNXT_TX_EVENT) {
2453 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2454 u16 prod = txr->tx_prod;
2456 /* Sync BD data before updating doorbell */
2459 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2462 cpr->cp_raw_cons = raw_cons;
2463 bnapi->tx_pkts += tx_pkts;
2464 bnapi->events |= event;
2468 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2470 if (bnapi->tx_pkts) {
2471 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2475 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2476 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2478 if (bnapi->events & BNXT_AGG_EVENT)
2479 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2480 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2485 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2488 struct bnxt_napi *bnapi = cpr->bnapi;
2491 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2493 /* ACK completion ring before freeing tx ring and producing new
2494 * buffers in rx/agg rings to prevent overflowing the completion
2497 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2499 __bnxt_poll_work_done(bp, bnapi);
2503 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2505 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2506 struct bnxt *bp = bnapi->bp;
2507 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2508 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2509 struct tx_cmp *txcmp;
2510 struct rx_cmp_ext *rxcmp1;
2511 u32 cp_cons, tmp_raw_cons;
2512 u32 raw_cons = cpr->cp_raw_cons;
2519 cp_cons = RING_CMP(raw_cons);
2520 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2522 if (!TX_CMP_VALID(txcmp, raw_cons))
2525 /* The valid test of the entry must be done first before
2526 * reading any further.
2529 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2530 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2531 cp_cons = RING_CMP(tmp_raw_cons);
2532 rxcmp1 = (struct rx_cmp_ext *)
2533 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2535 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2538 /* force an error to recycle the buffer */
2539 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2540 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2542 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2543 if (likely(rc == -EIO) && budget)
2545 else if (rc == -EBUSY) /* partial completion */
2547 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2548 CMPL_BASE_TYPE_HWRM_DONE)) {
2549 bnxt_hwrm_handler(bp, txcmp);
2552 "Invalid completion received on special ring\n");
2554 raw_cons = NEXT_RAW_CMP(raw_cons);
2556 if (rx_pkts == budget)
2560 cpr->cp_raw_cons = raw_cons;
2561 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2562 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2564 if (event & BNXT_AGG_EVENT)
2565 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2567 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2568 napi_complete_done(napi, rx_pkts);
2569 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2574 static int bnxt_poll(struct napi_struct *napi, int budget)
2576 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2577 struct bnxt *bp = bnapi->bp;
2578 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2581 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2582 napi_complete(napi);
2586 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2588 if (work_done >= budget) {
2590 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2594 if (!bnxt_has_work(bp, cpr)) {
2595 if (napi_complete_done(napi, work_done))
2596 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2600 if (bp->flags & BNXT_FLAG_DIM) {
2601 struct dim_sample dim_sample = {};
2603 dim_update_sample(cpr->event_ctr,
2607 net_dim(&cpr->dim, dim_sample);
2612 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2614 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2615 int i, work_done = 0;
2617 for (i = 0; i < 2; i++) {
2618 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2621 work_done += __bnxt_poll_work(bp, cpr2,
2622 budget - work_done);
2623 cpr->has_more_work |= cpr2->has_more_work;
2629 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2632 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2635 for (i = 0; i < 2; i++) {
2636 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2637 struct bnxt_db_info *db;
2639 if (cpr2 && cpr2->had_work_done) {
2641 writeq(db->db_key64 | dbr_type |
2642 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2643 cpr2->had_work_done = 0;
2646 __bnxt_poll_work_done(bp, bnapi);
2649 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2651 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2652 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2653 u32 raw_cons = cpr->cp_raw_cons;
2654 struct bnxt *bp = bnapi->bp;
2655 struct nqe_cn *nqcmp;
2659 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2660 napi_complete(napi);
2663 if (cpr->has_more_work) {
2664 cpr->has_more_work = 0;
2665 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2668 cons = RING_CMP(raw_cons);
2669 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2671 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2672 if (cpr->has_more_work)
2675 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2676 cpr->cp_raw_cons = raw_cons;
2677 if (napi_complete_done(napi, work_done))
2678 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2683 /* The valid test of the entry must be done first before
2684 * reading any further.
2688 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2689 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2690 struct bnxt_cp_ring_info *cpr2;
2692 cpr2 = cpr->cp_ring_arr[idx];
2693 work_done += __bnxt_poll_work(bp, cpr2,
2694 budget - work_done);
2695 cpr->has_more_work |= cpr2->has_more_work;
2697 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2699 raw_cons = NEXT_RAW_CMP(raw_cons);
2701 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2702 if (raw_cons != cpr->cp_raw_cons) {
2703 cpr->cp_raw_cons = raw_cons;
2704 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2709 static void bnxt_free_tx_skbs(struct bnxt *bp)
2712 struct pci_dev *pdev = bp->pdev;
2717 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2718 for (i = 0; i < bp->tx_nr_rings; i++) {
2719 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2722 for (j = 0; j < max_idx;) {
2723 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2724 struct sk_buff *skb;
2727 if (i < bp->tx_nr_rings_xdp &&
2728 tx_buf->action == XDP_REDIRECT) {
2729 dma_unmap_single(&pdev->dev,
2730 dma_unmap_addr(tx_buf, mapping),
2731 dma_unmap_len(tx_buf, len),
2733 xdp_return_frame(tx_buf->xdpf);
2735 tx_buf->xdpf = NULL;
2748 if (tx_buf->is_push) {
2754 dma_unmap_single(&pdev->dev,
2755 dma_unmap_addr(tx_buf, mapping),
2759 last = tx_buf->nr_frags;
2761 for (k = 0; k < last; k++, j++) {
2762 int ring_idx = j & bp->tx_ring_mask;
2763 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2765 tx_buf = &txr->tx_buf_ring[ring_idx];
2768 dma_unmap_addr(tx_buf, mapping),
2769 skb_frag_size(frag), DMA_TO_DEVICE);
2773 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2777 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2779 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2780 struct pci_dev *pdev = bp->pdev;
2781 struct bnxt_tpa_idx_map *map;
2782 int i, max_idx, max_agg_idx;
2784 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2785 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2787 goto skip_rx_tpa_free;
2789 for (i = 0; i < bp->max_tpa; i++) {
2790 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2791 u8 *data = tpa_info->data;
2796 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2797 bp->rx_buf_use_size, bp->rx_dir,
2798 DMA_ATTR_WEAK_ORDERING);
2800 tpa_info->data = NULL;
2806 for (i = 0; i < max_idx; i++) {
2807 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2808 dma_addr_t mapping = rx_buf->mapping;
2809 void *data = rx_buf->data;
2814 rx_buf->data = NULL;
2815 if (BNXT_RX_PAGE_MODE(bp)) {
2816 mapping -= bp->rx_dma_offset;
2817 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2819 DMA_ATTR_WEAK_ORDERING);
2820 page_pool_recycle_direct(rxr->page_pool, data);
2822 dma_unmap_single_attrs(&pdev->dev, mapping,
2823 bp->rx_buf_use_size, bp->rx_dir,
2824 DMA_ATTR_WEAK_ORDERING);
2828 for (i = 0; i < max_agg_idx; i++) {
2829 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2830 struct page *page = rx_agg_buf->page;
2835 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2836 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
2837 DMA_ATTR_WEAK_ORDERING);
2839 rx_agg_buf->page = NULL;
2840 __clear_bit(i, rxr->rx_agg_bmap);
2845 __free_page(rxr->rx_page);
2846 rxr->rx_page = NULL;
2848 map = rxr->rx_tpa_idx_map;
2850 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2853 static void bnxt_free_rx_skbs(struct bnxt *bp)
2860 for (i = 0; i < bp->rx_nr_rings; i++)
2861 bnxt_free_one_rx_ring_skbs(bp, i);
2864 static void bnxt_free_skbs(struct bnxt *bp)
2866 bnxt_free_tx_skbs(bp);
2867 bnxt_free_rx_skbs(bp);
2870 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2872 u8 init_val = mem_init->init_val;
2873 u16 offset = mem_init->offset;
2879 if (offset == BNXT_MEM_INVALID_OFFSET) {
2880 memset(p, init_val, len);
2883 for (i = 0; i < len; i += mem_init->size)
2884 *(p2 + i + offset) = init_val;
2887 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2889 struct pci_dev *pdev = bp->pdev;
2892 for (i = 0; i < rmem->nr_pages; i++) {
2893 if (!rmem->pg_arr[i])
2896 dma_free_coherent(&pdev->dev, rmem->page_size,
2897 rmem->pg_arr[i], rmem->dma_arr[i]);
2899 rmem->pg_arr[i] = NULL;
2902 size_t pg_tbl_size = rmem->nr_pages * 8;
2904 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2905 pg_tbl_size = rmem->page_size;
2906 dma_free_coherent(&pdev->dev, pg_tbl_size,
2907 rmem->pg_tbl, rmem->pg_tbl_map);
2908 rmem->pg_tbl = NULL;
2910 if (rmem->vmem_size && *rmem->vmem) {
2916 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2918 struct pci_dev *pdev = bp->pdev;
2922 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2923 valid_bit = PTU_PTE_VALID;
2924 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2925 size_t pg_tbl_size = rmem->nr_pages * 8;
2927 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2928 pg_tbl_size = rmem->page_size;
2929 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2936 for (i = 0; i < rmem->nr_pages; i++) {
2937 u64 extra_bits = valid_bit;
2939 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2943 if (!rmem->pg_arr[i])
2947 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2949 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2950 if (i == rmem->nr_pages - 2 &&
2951 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2952 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2953 else if (i == rmem->nr_pages - 1 &&
2954 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2955 extra_bits |= PTU_PTE_LAST;
2957 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2961 if (rmem->vmem_size) {
2962 *rmem->vmem = vzalloc(rmem->vmem_size);
2969 static void bnxt_free_tpa_info(struct bnxt *bp)
2973 for (i = 0; i < bp->rx_nr_rings; i++) {
2974 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2976 kfree(rxr->rx_tpa_idx_map);
2977 rxr->rx_tpa_idx_map = NULL;
2979 kfree(rxr->rx_tpa[0].agg_arr);
2980 rxr->rx_tpa[0].agg_arr = NULL;
2987 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2989 int i, j, total_aggs = 0;
2991 bp->max_tpa = MAX_TPA;
2992 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2993 if (!bp->max_tpa_v2)
2995 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2996 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2999 for (i = 0; i < bp->rx_nr_rings; i++) {
3000 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3001 struct rx_agg_cmp *agg;
3003 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3008 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3010 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3011 rxr->rx_tpa[0].agg_arr = agg;
3014 for (j = 1; j < bp->max_tpa; j++)
3015 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
3016 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3018 if (!rxr->rx_tpa_idx_map)
3024 static void bnxt_free_rx_rings(struct bnxt *bp)
3031 bnxt_free_tpa_info(bp);
3032 for (i = 0; i < bp->rx_nr_rings; i++) {
3033 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3034 struct bnxt_ring_struct *ring;
3037 bpf_prog_put(rxr->xdp_prog);
3039 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3040 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3042 page_pool_destroy(rxr->page_pool);
3043 rxr->page_pool = NULL;
3045 kfree(rxr->rx_agg_bmap);
3046 rxr->rx_agg_bmap = NULL;
3048 ring = &rxr->rx_ring_struct;
3049 bnxt_free_ring(bp, &ring->ring_mem);
3051 ring = &rxr->rx_agg_ring_struct;
3052 bnxt_free_ring(bp, &ring->ring_mem);
3056 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3057 struct bnxt_rx_ring_info *rxr)
3059 struct page_pool_params pp = { 0 };
3061 pp.pool_size = bp->rx_ring_size;
3062 pp.nid = dev_to_node(&bp->pdev->dev);
3063 pp.dev = &bp->pdev->dev;
3064 pp.dma_dir = DMA_BIDIRECTIONAL;
3066 rxr->page_pool = page_pool_create(&pp);
3067 if (IS_ERR(rxr->page_pool)) {
3068 int err = PTR_ERR(rxr->page_pool);
3070 rxr->page_pool = NULL;
3076 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3078 int i, rc = 0, agg_rings = 0;
3083 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3086 for (i = 0; i < bp->rx_nr_rings; i++) {
3087 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3088 struct bnxt_ring_struct *ring;
3090 ring = &rxr->rx_ring_struct;
3092 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3096 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3100 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3104 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3108 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3116 ring = &rxr->rx_agg_ring_struct;
3117 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3122 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3123 mem_size = rxr->rx_agg_bmap_size / 8;
3124 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3125 if (!rxr->rx_agg_bmap)
3129 if (bp->flags & BNXT_FLAG_TPA)
3130 rc = bnxt_alloc_tpa_info(bp);
3134 static void bnxt_free_tx_rings(struct bnxt *bp)
3137 struct pci_dev *pdev = bp->pdev;
3142 for (i = 0; i < bp->tx_nr_rings; i++) {
3143 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3144 struct bnxt_ring_struct *ring;
3147 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3148 txr->tx_push, txr->tx_push_mapping);
3149 txr->tx_push = NULL;
3152 ring = &txr->tx_ring_struct;
3154 bnxt_free_ring(bp, &ring->ring_mem);
3158 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3161 struct pci_dev *pdev = bp->pdev;
3163 bp->tx_push_size = 0;
3164 if (bp->tx_push_thresh) {
3167 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3168 bp->tx_push_thresh);
3170 if (push_size > 256) {
3172 bp->tx_push_thresh = 0;
3175 bp->tx_push_size = push_size;
3178 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3179 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3180 struct bnxt_ring_struct *ring;
3183 ring = &txr->tx_ring_struct;
3185 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3189 ring->grp_idx = txr->bnapi->index;
3190 if (bp->tx_push_size) {
3193 /* One pre-allocated DMA buffer to backup
3196 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3198 &txr->tx_push_mapping,
3204 mapping = txr->tx_push_mapping +
3205 sizeof(struct tx_push_bd);
3206 txr->data_mapping = cpu_to_le64(mapping);
3208 qidx = bp->tc_to_qidx[j];
3209 ring->queue_id = bp->q_info[qidx].queue_id;
3210 if (i < bp->tx_nr_rings_xdp)
3212 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3218 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3220 kfree(cpr->cp_desc_ring);
3221 cpr->cp_desc_ring = NULL;
3222 kfree(cpr->cp_desc_mapping);
3223 cpr->cp_desc_mapping = NULL;
3226 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3228 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3229 if (!cpr->cp_desc_ring)
3231 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3233 if (!cpr->cp_desc_mapping)
3238 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3244 for (i = 0; i < bp->cp_nr_rings; i++) {
3245 struct bnxt_napi *bnapi = bp->bnapi[i];
3249 bnxt_free_cp_arrays(&bnapi->cp_ring);
3253 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3255 int i, n = bp->cp_nr_pages;
3257 for (i = 0; i < bp->cp_nr_rings; i++) {
3258 struct bnxt_napi *bnapi = bp->bnapi[i];
3263 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3270 static void bnxt_free_cp_rings(struct bnxt *bp)
3277 for (i = 0; i < bp->cp_nr_rings; i++) {
3278 struct bnxt_napi *bnapi = bp->bnapi[i];
3279 struct bnxt_cp_ring_info *cpr;
3280 struct bnxt_ring_struct *ring;
3286 cpr = &bnapi->cp_ring;
3287 ring = &cpr->cp_ring_struct;
3289 bnxt_free_ring(bp, &ring->ring_mem);
3291 for (j = 0; j < 2; j++) {
3292 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3295 ring = &cpr2->cp_ring_struct;
3296 bnxt_free_ring(bp, &ring->ring_mem);
3297 bnxt_free_cp_arrays(cpr2);
3299 cpr->cp_ring_arr[j] = NULL;
3305 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3307 struct bnxt_ring_mem_info *rmem;
3308 struct bnxt_ring_struct *ring;
3309 struct bnxt_cp_ring_info *cpr;
3312 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3316 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3318 bnxt_free_cp_arrays(cpr);
3322 ring = &cpr->cp_ring_struct;
3323 rmem = &ring->ring_mem;
3324 rmem->nr_pages = bp->cp_nr_pages;
3325 rmem->page_size = HW_CMPD_RING_SIZE;
3326 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3327 rmem->dma_arr = cpr->cp_desc_mapping;
3328 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3329 rc = bnxt_alloc_ring(bp, rmem);
3331 bnxt_free_ring(bp, rmem);
3332 bnxt_free_cp_arrays(cpr);
3339 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3341 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3342 int i, rc, ulp_base_vec, ulp_msix;
3344 ulp_msix = bnxt_get_ulp_msix_num(bp);
3345 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3346 for (i = 0; i < bp->cp_nr_rings; i++) {
3347 struct bnxt_napi *bnapi = bp->bnapi[i];
3348 struct bnxt_cp_ring_info *cpr;
3349 struct bnxt_ring_struct *ring;
3354 cpr = &bnapi->cp_ring;
3356 ring = &cpr->cp_ring_struct;
3358 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3362 if (ulp_msix && i >= ulp_base_vec)
3363 ring->map_idx = i + ulp_msix;
3367 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3370 if (i < bp->rx_nr_rings) {
3371 struct bnxt_cp_ring_info *cpr2 =
3372 bnxt_alloc_cp_sub_ring(bp);
3374 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3377 cpr2->bnapi = bnapi;
3379 if ((sh && i < bp->tx_nr_rings) ||
3380 (!sh && i >= bp->rx_nr_rings)) {
3381 struct bnxt_cp_ring_info *cpr2 =
3382 bnxt_alloc_cp_sub_ring(bp);
3384 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3387 cpr2->bnapi = bnapi;
3393 static void bnxt_init_ring_struct(struct bnxt *bp)
3397 for (i = 0; i < bp->cp_nr_rings; i++) {
3398 struct bnxt_napi *bnapi = bp->bnapi[i];
3399 struct bnxt_ring_mem_info *rmem;
3400 struct bnxt_cp_ring_info *cpr;
3401 struct bnxt_rx_ring_info *rxr;
3402 struct bnxt_tx_ring_info *txr;
3403 struct bnxt_ring_struct *ring;
3408 cpr = &bnapi->cp_ring;
3409 ring = &cpr->cp_ring_struct;
3410 rmem = &ring->ring_mem;
3411 rmem->nr_pages = bp->cp_nr_pages;
3412 rmem->page_size = HW_CMPD_RING_SIZE;
3413 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3414 rmem->dma_arr = cpr->cp_desc_mapping;
3415 rmem->vmem_size = 0;
3417 rxr = bnapi->rx_ring;
3421 ring = &rxr->rx_ring_struct;
3422 rmem = &ring->ring_mem;
3423 rmem->nr_pages = bp->rx_nr_pages;
3424 rmem->page_size = HW_RXBD_RING_SIZE;
3425 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3426 rmem->dma_arr = rxr->rx_desc_mapping;
3427 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3428 rmem->vmem = (void **)&rxr->rx_buf_ring;
3430 ring = &rxr->rx_agg_ring_struct;
3431 rmem = &ring->ring_mem;
3432 rmem->nr_pages = bp->rx_agg_nr_pages;
3433 rmem->page_size = HW_RXBD_RING_SIZE;
3434 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3435 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3436 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3437 rmem->vmem = (void **)&rxr->rx_agg_ring;
3440 txr = bnapi->tx_ring;
3444 ring = &txr->tx_ring_struct;
3445 rmem = &ring->ring_mem;
3446 rmem->nr_pages = bp->tx_nr_pages;
3447 rmem->page_size = HW_RXBD_RING_SIZE;
3448 rmem->pg_arr = (void **)txr->tx_desc_ring;
3449 rmem->dma_arr = txr->tx_desc_mapping;
3450 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3451 rmem->vmem = (void **)&txr->tx_buf_ring;
3455 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3459 struct rx_bd **rx_buf_ring;
3461 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3462 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3466 rxbd = rx_buf_ring[i];
3470 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3471 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3472 rxbd->rx_bd_opaque = prod;
3477 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3479 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3480 struct net_device *dev = bp->dev;
3484 prod = rxr->rx_prod;
3485 for (i = 0; i < bp->rx_ring_size; i++) {
3486 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3487 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3488 ring_nr, i, bp->rx_ring_size);
3491 prod = NEXT_RX(prod);
3493 rxr->rx_prod = prod;
3495 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3498 prod = rxr->rx_agg_prod;
3499 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3500 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3501 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3502 ring_nr, i, bp->rx_ring_size);
3505 prod = NEXT_RX_AGG(prod);
3507 rxr->rx_agg_prod = prod;
3513 for (i = 0; i < bp->max_tpa; i++) {
3514 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3518 rxr->rx_tpa[i].data = data;
3519 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3520 rxr->rx_tpa[i].mapping = mapping;
3526 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3528 struct bnxt_rx_ring_info *rxr;
3529 struct bnxt_ring_struct *ring;
3532 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3533 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3535 if (NET_IP_ALIGN == 2)
3536 type |= RX_BD_FLAGS_SOP;
3538 rxr = &bp->rx_ring[ring_nr];
3539 ring = &rxr->rx_ring_struct;
3540 bnxt_init_rxbd_pages(ring, type);
3542 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3543 bpf_prog_add(bp->xdp_prog, 1);
3544 rxr->xdp_prog = bp->xdp_prog;
3546 ring->fw_ring_id = INVALID_HW_RING_ID;
3548 ring = &rxr->rx_agg_ring_struct;
3549 ring->fw_ring_id = INVALID_HW_RING_ID;
3551 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3552 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3553 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3555 bnxt_init_rxbd_pages(ring, type);
3558 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3561 static void bnxt_init_cp_rings(struct bnxt *bp)
3565 for (i = 0; i < bp->cp_nr_rings; i++) {
3566 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3567 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3569 ring->fw_ring_id = INVALID_HW_RING_ID;
3570 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3571 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3572 for (j = 0; j < 2; j++) {
3573 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3578 ring = &cpr2->cp_ring_struct;
3579 ring->fw_ring_id = INVALID_HW_RING_ID;
3580 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3581 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3586 static int bnxt_init_rx_rings(struct bnxt *bp)
3590 if (BNXT_RX_PAGE_MODE(bp)) {
3591 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3592 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3594 bp->rx_offset = BNXT_RX_OFFSET;
3595 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3598 for (i = 0; i < bp->rx_nr_rings; i++) {
3599 rc = bnxt_init_one_rx_ring(bp, i);
3607 static int bnxt_init_tx_rings(struct bnxt *bp)
3611 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3614 for (i = 0; i < bp->tx_nr_rings; i++) {
3615 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3616 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3618 ring->fw_ring_id = INVALID_HW_RING_ID;
3624 static void bnxt_free_ring_grps(struct bnxt *bp)
3626 kfree(bp->grp_info);
3627 bp->grp_info = NULL;
3630 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3635 bp->grp_info = kcalloc(bp->cp_nr_rings,
3636 sizeof(struct bnxt_ring_grp_info),
3641 for (i = 0; i < bp->cp_nr_rings; i++) {
3643 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3644 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3645 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3646 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3647 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3652 static void bnxt_free_vnics(struct bnxt *bp)
3654 kfree(bp->vnic_info);
3655 bp->vnic_info = NULL;
3659 static int bnxt_alloc_vnics(struct bnxt *bp)
3663 #ifdef CONFIG_RFS_ACCEL
3664 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3665 num_vnics += bp->rx_nr_rings;
3668 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3671 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3676 bp->nr_vnics = num_vnics;
3680 static void bnxt_init_vnics(struct bnxt *bp)
3684 for (i = 0; i < bp->nr_vnics; i++) {
3685 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3688 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3689 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3690 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3692 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3694 if (bp->vnic_info[i].rss_hash_key) {
3696 prandom_bytes(vnic->rss_hash_key,
3699 memcpy(vnic->rss_hash_key,
3700 bp->vnic_info[0].rss_hash_key,
3706 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3710 pages = ring_size / desc_per_pg;
3717 while (pages & (pages - 1))
3723 void bnxt_set_tpa_flags(struct bnxt *bp)
3725 bp->flags &= ~BNXT_FLAG_TPA;
3726 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3728 if (bp->dev->features & NETIF_F_LRO)
3729 bp->flags |= BNXT_FLAG_LRO;
3730 else if (bp->dev->features & NETIF_F_GRO_HW)
3731 bp->flags |= BNXT_FLAG_GRO;
3734 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3737 void bnxt_set_ring_params(struct bnxt *bp)
3739 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3740 u32 agg_factor = 0, agg_ring_size = 0;
3742 /* 8 for CRC and VLAN */
3743 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3745 rx_space = rx_size + NET_SKB_PAD +
3746 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3748 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3749 ring_size = bp->rx_ring_size;
3750 bp->rx_agg_ring_size = 0;
3751 bp->rx_agg_nr_pages = 0;
3753 if (bp->flags & BNXT_FLAG_TPA)
3754 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3756 bp->flags &= ~BNXT_FLAG_JUMBO;
3757 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3760 bp->flags |= BNXT_FLAG_JUMBO;
3761 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3762 if (jumbo_factor > agg_factor)
3763 agg_factor = jumbo_factor;
3766 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3767 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3768 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3769 bp->rx_ring_size, ring_size);
3770 bp->rx_ring_size = ring_size;
3772 agg_ring_size = ring_size * agg_factor;
3774 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3776 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3777 u32 tmp = agg_ring_size;
3779 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3780 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3781 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3782 tmp, agg_ring_size);
3784 bp->rx_agg_ring_size = agg_ring_size;
3785 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3786 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3787 rx_space = rx_size + NET_SKB_PAD +
3788 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3791 bp->rx_buf_use_size = rx_size;
3792 bp->rx_buf_size = rx_space;
3794 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3795 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3797 ring_size = bp->tx_ring_size;
3798 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3799 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3801 max_rx_cmpl = bp->rx_ring_size;
3802 /* MAX TPA needs to be added because TPA_START completions are
3803 * immediately recycled, so the TPA completions are not bound by
3806 if (bp->flags & BNXT_FLAG_TPA)
3807 max_rx_cmpl += bp->max_tpa;
3808 /* RX and TPA completions are 32-byte, all others are 16-byte */
3809 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3810 bp->cp_ring_size = ring_size;
3812 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3813 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3814 bp->cp_nr_pages = MAX_CP_PAGES;
3815 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3816 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3817 ring_size, bp->cp_ring_size);
3819 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3820 bp->cp_ring_mask = bp->cp_bit - 1;
3823 /* Changing allocation mode of RX rings.
3824 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3826 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3829 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3832 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3833 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3834 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3835 bp->rx_dir = DMA_BIDIRECTIONAL;
3836 bp->rx_skb_func = bnxt_rx_page_skb;
3837 /* Disable LRO or GRO_HW */
3838 netdev_update_features(bp->dev);
3840 bp->dev->max_mtu = bp->max_mtu;
3841 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3842 bp->rx_dir = DMA_FROM_DEVICE;
3843 bp->rx_skb_func = bnxt_rx_skb;
3848 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3851 struct bnxt_vnic_info *vnic;
3852 struct pci_dev *pdev = bp->pdev;
3857 for (i = 0; i < bp->nr_vnics; i++) {
3858 vnic = &bp->vnic_info[i];
3860 kfree(vnic->fw_grp_ids);
3861 vnic->fw_grp_ids = NULL;
3863 kfree(vnic->uc_list);
3864 vnic->uc_list = NULL;
3866 if (vnic->mc_list) {
3867 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3868 vnic->mc_list, vnic->mc_list_mapping);
3869 vnic->mc_list = NULL;
3872 if (vnic->rss_table) {
3873 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3875 vnic->rss_table_dma_addr);
3876 vnic->rss_table = NULL;
3879 vnic->rss_hash_key = NULL;
3884 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3886 int i, rc = 0, size;
3887 struct bnxt_vnic_info *vnic;
3888 struct pci_dev *pdev = bp->pdev;
3891 for (i = 0; i < bp->nr_vnics; i++) {
3892 vnic = &bp->vnic_info[i];
3894 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3895 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3898 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3899 if (!vnic->uc_list) {
3906 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3907 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3909 dma_alloc_coherent(&pdev->dev,
3911 &vnic->mc_list_mapping,
3913 if (!vnic->mc_list) {
3919 if (bp->flags & BNXT_FLAG_CHIP_P5)
3920 goto vnic_skip_grps;
3922 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3923 max_rings = bp->rx_nr_rings;
3927 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3928 if (!vnic->fw_grp_ids) {
3933 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3934 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3937 /* Allocate rss table and hash key */
3938 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3939 if (bp->flags & BNXT_FLAG_CHIP_P5)
3940 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3942 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3943 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3944 vnic->rss_table_size,
3945 &vnic->rss_table_dma_addr,
3947 if (!vnic->rss_table) {
3952 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3953 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3961 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3963 struct bnxt_hwrm_wait_token *token;
3965 dma_pool_destroy(bp->hwrm_dma_pool);
3966 bp->hwrm_dma_pool = NULL;
3969 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
3970 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
3974 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3976 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
3978 BNXT_HWRM_DMA_ALIGN, 0);
3979 if (!bp->hwrm_dma_pool)
3982 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
3987 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3989 kfree(stats->hw_masks);
3990 stats->hw_masks = NULL;
3991 kfree(stats->sw_stats);
3992 stats->sw_stats = NULL;
3993 if (stats->hw_stats) {
3994 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3995 stats->hw_stats_map);
3996 stats->hw_stats = NULL;
4000 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4003 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4004 &stats->hw_stats_map, GFP_KERNEL);
4005 if (!stats->hw_stats)
4008 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4009 if (!stats->sw_stats)
4013 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4014 if (!stats->hw_masks)
4020 bnxt_free_stats_mem(bp, stats);
4024 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4028 for (i = 0; i < count; i++)
4032 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4036 for (i = 0; i < count; i++)
4037 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4040 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4041 struct bnxt_stats_mem *stats)
4043 struct hwrm_func_qstats_ext_output *resp;
4044 struct hwrm_func_qstats_ext_input *req;
4048 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4049 !(bp->flags & BNXT_FLAG_CHIP_P5))
4052 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4056 req->fid = cpu_to_le16(0xffff);
4057 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4059 resp = hwrm_req_hold(bp, req);
4060 rc = hwrm_req_send(bp, req);
4062 hw_masks = &resp->rx_ucast_pkts;
4063 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4065 hwrm_req_drop(bp, req);
4069 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4070 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4072 static void bnxt_init_stats(struct bnxt *bp)
4074 struct bnxt_napi *bnapi = bp->bnapi[0];
4075 struct bnxt_cp_ring_info *cpr;
4076 struct bnxt_stats_mem *stats;
4077 __le64 *rx_stats, *tx_stats;
4078 int rc, rx_count, tx_count;
4079 u64 *rx_masks, *tx_masks;
4083 cpr = &bnapi->cp_ring;
4084 stats = &cpr->stats;
4085 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4087 if (bp->flags & BNXT_FLAG_CHIP_P5)
4088 mask = (1ULL << 48) - 1;
4091 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4093 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4094 stats = &bp->port_stats;
4095 rx_stats = stats->hw_stats;
4096 rx_masks = stats->hw_masks;
4097 rx_count = sizeof(struct rx_port_stats) / 8;
4098 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4099 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4100 tx_count = sizeof(struct tx_port_stats) / 8;
4102 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4103 rc = bnxt_hwrm_port_qstats(bp, flags);
4105 mask = (1ULL << 40) - 1;
4107 bnxt_fill_masks(rx_masks, mask, rx_count);
4108 bnxt_fill_masks(tx_masks, mask, tx_count);
4110 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4111 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4112 bnxt_hwrm_port_qstats(bp, 0);
4115 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4116 stats = &bp->rx_port_stats_ext;
4117 rx_stats = stats->hw_stats;
4118 rx_masks = stats->hw_masks;
4119 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4120 stats = &bp->tx_port_stats_ext;
4121 tx_stats = stats->hw_stats;
4122 tx_masks = stats->hw_masks;
4123 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4125 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4126 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4128 mask = (1ULL << 40) - 1;
4130 bnxt_fill_masks(rx_masks, mask, rx_count);
4132 bnxt_fill_masks(tx_masks, mask, tx_count);
4134 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4136 bnxt_copy_hw_masks(tx_masks, tx_stats,
4138 bnxt_hwrm_port_qstats_ext(bp, 0);
4143 static void bnxt_free_port_stats(struct bnxt *bp)
4145 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4146 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4148 bnxt_free_stats_mem(bp, &bp->port_stats);
4149 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4150 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4153 static void bnxt_free_ring_stats(struct bnxt *bp)
4160 for (i = 0; i < bp->cp_nr_rings; i++) {
4161 struct bnxt_napi *bnapi = bp->bnapi[i];
4162 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4164 bnxt_free_stats_mem(bp, &cpr->stats);
4168 static int bnxt_alloc_stats(struct bnxt *bp)
4173 size = bp->hw_ring_stats_size;
4175 for (i = 0; i < bp->cp_nr_rings; i++) {
4176 struct bnxt_napi *bnapi = bp->bnapi[i];
4177 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4179 cpr->stats.len = size;
4180 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4184 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4187 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4190 if (bp->port_stats.hw_stats)
4191 goto alloc_ext_stats;
4193 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4194 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4198 bp->flags |= BNXT_FLAG_PORT_STATS;
4201 /* Display extended statistics only if FW supports it */
4202 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4203 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4206 if (bp->rx_port_stats_ext.hw_stats)
4207 goto alloc_tx_ext_stats;
4209 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4210 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4211 /* Extended stats are optional */
4216 if (bp->tx_port_stats_ext.hw_stats)
4219 if (bp->hwrm_spec_code >= 0x10902 ||
4220 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4221 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4222 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4223 /* Extended stats are optional */
4227 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4231 static void bnxt_clear_ring_indices(struct bnxt *bp)
4238 for (i = 0; i < bp->cp_nr_rings; i++) {
4239 struct bnxt_napi *bnapi = bp->bnapi[i];
4240 struct bnxt_cp_ring_info *cpr;
4241 struct bnxt_rx_ring_info *rxr;
4242 struct bnxt_tx_ring_info *txr;
4247 cpr = &bnapi->cp_ring;
4248 cpr->cp_raw_cons = 0;
4250 txr = bnapi->tx_ring;
4256 rxr = bnapi->rx_ring;
4259 rxr->rx_agg_prod = 0;
4260 rxr->rx_sw_agg_prod = 0;
4261 rxr->rx_next_cons = 0;
4266 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4268 #ifdef CONFIG_RFS_ACCEL
4271 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4272 * safe to delete the hash table.
4274 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4275 struct hlist_head *head;
4276 struct hlist_node *tmp;
4277 struct bnxt_ntuple_filter *fltr;
4279 head = &bp->ntp_fltr_hash_tbl[i];
4280 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4281 hlist_del(&fltr->hash);
4286 kfree(bp->ntp_fltr_bmap);
4287 bp->ntp_fltr_bmap = NULL;
4289 bp->ntp_fltr_count = 0;
4293 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4295 #ifdef CONFIG_RFS_ACCEL
4298 if (!(bp->flags & BNXT_FLAG_RFS))
4301 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4302 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4304 bp->ntp_fltr_count = 0;
4305 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4309 if (!bp->ntp_fltr_bmap)
4318 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4320 bnxt_free_vnic_attributes(bp);
4321 bnxt_free_tx_rings(bp);
4322 bnxt_free_rx_rings(bp);
4323 bnxt_free_cp_rings(bp);
4324 bnxt_free_all_cp_arrays(bp);
4325 bnxt_free_ntp_fltrs(bp, irq_re_init);
4327 bnxt_free_ring_stats(bp);
4328 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4329 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4330 bnxt_free_port_stats(bp);
4331 bnxt_free_ring_grps(bp);
4332 bnxt_free_vnics(bp);
4333 kfree(bp->tx_ring_map);
4334 bp->tx_ring_map = NULL;
4342 bnxt_clear_ring_indices(bp);
4346 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4348 int i, j, rc, size, arr_size;
4352 /* Allocate bnapi mem pointer array and mem block for
4355 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4357 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4358 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4364 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4365 bp->bnapi[i] = bnapi;
4366 bp->bnapi[i]->index = i;
4367 bp->bnapi[i]->bp = bp;
4368 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4369 struct bnxt_cp_ring_info *cpr =
4370 &bp->bnapi[i]->cp_ring;
4372 cpr->cp_ring_struct.ring_mem.flags =
4373 BNXT_RMEM_RING_PTE_FLAG;
4377 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4378 sizeof(struct bnxt_rx_ring_info),
4383 for (i = 0; i < bp->rx_nr_rings; i++) {
4384 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4386 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4387 rxr->rx_ring_struct.ring_mem.flags =
4388 BNXT_RMEM_RING_PTE_FLAG;
4389 rxr->rx_agg_ring_struct.ring_mem.flags =
4390 BNXT_RMEM_RING_PTE_FLAG;
4392 rxr->bnapi = bp->bnapi[i];
4393 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4396 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4397 sizeof(struct bnxt_tx_ring_info),
4402 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4405 if (!bp->tx_ring_map)
4408 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4411 j = bp->rx_nr_rings;
4413 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4414 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4416 if (bp->flags & BNXT_FLAG_CHIP_P5)
4417 txr->tx_ring_struct.ring_mem.flags =
4418 BNXT_RMEM_RING_PTE_FLAG;
4419 txr->bnapi = bp->bnapi[j];
4420 bp->bnapi[j]->tx_ring = txr;
4421 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4422 if (i >= bp->tx_nr_rings_xdp) {
4423 txr->txq_index = i - bp->tx_nr_rings_xdp;
4424 bp->bnapi[j]->tx_int = bnxt_tx_int;
4426 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4427 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4431 rc = bnxt_alloc_stats(bp);
4434 bnxt_init_stats(bp);
4436 rc = bnxt_alloc_ntp_fltrs(bp);
4440 rc = bnxt_alloc_vnics(bp);
4445 rc = bnxt_alloc_all_cp_arrays(bp);
4449 bnxt_init_ring_struct(bp);
4451 rc = bnxt_alloc_rx_rings(bp);
4455 rc = bnxt_alloc_tx_rings(bp);
4459 rc = bnxt_alloc_cp_rings(bp);
4463 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4464 BNXT_VNIC_UCAST_FLAG;
4465 rc = bnxt_alloc_vnic_attributes(bp);
4471 bnxt_free_mem(bp, true);
4475 static void bnxt_disable_int(struct bnxt *bp)
4482 for (i = 0; i < bp->cp_nr_rings; i++) {
4483 struct bnxt_napi *bnapi = bp->bnapi[i];
4484 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4485 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4487 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4488 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4492 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4494 struct bnxt_napi *bnapi = bp->bnapi[n];
4495 struct bnxt_cp_ring_info *cpr;
4497 cpr = &bnapi->cp_ring;
4498 return cpr->cp_ring_struct.map_idx;
4501 static void bnxt_disable_int_sync(struct bnxt *bp)
4508 atomic_inc(&bp->intr_sem);
4510 bnxt_disable_int(bp);
4511 for (i = 0; i < bp->cp_nr_rings; i++) {
4512 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4514 synchronize_irq(bp->irq_tbl[map_idx].vector);
4518 static void bnxt_enable_int(struct bnxt *bp)
4522 atomic_set(&bp->intr_sem, 0);
4523 for (i = 0; i < bp->cp_nr_rings; i++) {
4524 struct bnxt_napi *bnapi = bp->bnapi[i];
4525 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4527 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4531 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4534 DECLARE_BITMAP(async_events_bmap, 256);
4535 u32 *events = (u32 *)async_events_bmap;
4536 struct hwrm_func_drv_rgtr_output *resp;
4537 struct hwrm_func_drv_rgtr_input *req;
4541 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4545 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4546 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4547 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4549 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4550 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4551 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4552 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4553 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4554 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4555 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4556 req->flags = cpu_to_le32(flags);
4557 req->ver_maj_8b = DRV_VER_MAJ;
4558 req->ver_min_8b = DRV_VER_MIN;
4559 req->ver_upd_8b = DRV_VER_UPD;
4560 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4561 req->ver_min = cpu_to_le16(DRV_VER_MIN);
4562 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
4568 memset(data, 0, sizeof(data));
4569 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4570 u16 cmd = bnxt_vf_req_snif[i];
4571 unsigned int bit, idx;
4575 data[idx] |= 1 << bit;
4578 for (i = 0; i < 8; i++)
4579 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
4582 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4585 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4586 req->flags |= cpu_to_le32(
4587 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4589 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4590 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4591 u16 event_id = bnxt_async_events_arr[i];
4593 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4594 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4596 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4598 if (bmap && bmap_size) {
4599 for (i = 0; i < bmap_size; i++) {
4600 if (test_bit(i, bmap))
4601 __set_bit(i, async_events_bmap);
4604 for (i = 0; i < 8; i++)
4605 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
4609 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4611 resp = hwrm_req_hold(bp, req);
4612 rc = hwrm_req_send(bp, req);
4614 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4616 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4617 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4619 hwrm_req_drop(bp, req);
4623 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4625 struct hwrm_func_drv_unrgtr_input *req;
4628 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4631 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4634 return hwrm_req_send(bp, req);
4637 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4639 struct hwrm_tunnel_dst_port_free_input *req;
4642 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4646 req->tunnel_type = tunnel_type;
4648 switch (tunnel_type) {
4649 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4650 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4651 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4653 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4654 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4655 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4661 rc = hwrm_req_send(bp, req);
4663 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4668 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4671 struct hwrm_tunnel_dst_port_alloc_output *resp;
4672 struct hwrm_tunnel_dst_port_alloc_input *req;
4675 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4679 req->tunnel_type = tunnel_type;
4680 req->tunnel_dst_port_val = port;
4682 resp = hwrm_req_hold(bp, req);
4683 rc = hwrm_req_send(bp, req);
4685 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4690 switch (tunnel_type) {
4691 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4692 bp->vxlan_fw_dst_port_id =
4693 le16_to_cpu(resp->tunnel_dst_port_id);
4695 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4696 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4703 hwrm_req_drop(bp, req);
4707 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4709 struct hwrm_cfa_l2_set_rx_mask_input *req;
4710 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4713 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4717 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4718 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4719 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4720 req->mask = cpu_to_le32(vnic->rx_mask);
4721 return hwrm_req_send_silent(bp, req);
4724 #ifdef CONFIG_RFS_ACCEL
4725 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4726 struct bnxt_ntuple_filter *fltr)
4728 struct hwrm_cfa_ntuple_filter_free_input *req;
4731 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4735 req->ntuple_filter_id = fltr->filter_id;
4736 return hwrm_req_send(bp, req);
4739 #define BNXT_NTP_FLTR_FLAGS \
4740 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4741 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4742 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4743 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4744 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4745 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4746 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4747 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4748 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4749 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4750 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4751 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4752 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
4753 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4755 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
4756 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4758 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4759 struct bnxt_ntuple_filter *fltr)
4761 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4762 struct hwrm_cfa_ntuple_filter_alloc_input *req;
4763 struct flow_keys *keys = &fltr->fkeys;
4764 struct bnxt_vnic_info *vnic;
4768 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4772 req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4774 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4775 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4776 req->dst_id = cpu_to_le16(fltr->rxq);
4778 vnic = &bp->vnic_info[fltr->rxq + 1];
4779 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
4781 req->flags = cpu_to_le32(flags);
4782 req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4784 req->ethertype = htons(ETH_P_IP);
4785 memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4786 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4787 req->ip_protocol = keys->basic.ip_proto;
4789 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4792 req->ethertype = htons(ETH_P_IPV6);
4794 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4795 *(struct in6_addr *)&req->src_ipaddr[0] =
4796 keys->addrs.v6addrs.src;
4797 *(struct in6_addr *)&req->dst_ipaddr[0] =
4798 keys->addrs.v6addrs.dst;
4799 for (i = 0; i < 4; i++) {
4800 req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4801 req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4804 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
4805 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4806 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4807 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4809 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4810 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4812 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4815 req->src_port = keys->ports.src;
4816 req->src_port_mask = cpu_to_be16(0xffff);
4817 req->dst_port = keys->ports.dst;
4818 req->dst_port_mask = cpu_to_be16(0xffff);
4820 resp = hwrm_req_hold(bp, req);
4821 rc = hwrm_req_send(bp, req);
4823 fltr->filter_id = resp->ntuple_filter_id;
4824 hwrm_req_drop(bp, req);
4829 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4832 struct hwrm_cfa_l2_filter_alloc_output *resp;
4833 struct hwrm_cfa_l2_filter_alloc_input *req;
4836 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
4840 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4841 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4843 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4844 req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4846 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4847 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4848 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4849 memcpy(req->l2_addr, mac_addr, ETH_ALEN);
4850 req->l2_addr_mask[0] = 0xff;
4851 req->l2_addr_mask[1] = 0xff;
4852 req->l2_addr_mask[2] = 0xff;
4853 req->l2_addr_mask[3] = 0xff;
4854 req->l2_addr_mask[4] = 0xff;
4855 req->l2_addr_mask[5] = 0xff;
4857 resp = hwrm_req_hold(bp, req);
4858 rc = hwrm_req_send(bp, req);
4860 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4862 hwrm_req_drop(bp, req);
4866 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4868 struct hwrm_cfa_l2_filter_free_input *req;
4869 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4872 /* Any associated ntuple filters will also be cleared by firmware. */
4873 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
4876 hwrm_req_hold(bp, req);
4877 for (i = 0; i < num_of_vnics; i++) {
4878 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4880 for (j = 0; j < vnic->uc_filter_count; j++) {
4881 req->l2_filter_id = vnic->fw_l2_filter_id[j];
4883 rc = hwrm_req_send(bp, req);
4885 vnic->uc_filter_count = 0;
4887 hwrm_req_drop(bp, req);
4891 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4893 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4894 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4895 struct hwrm_vnic_tpa_cfg_input *req;
4898 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4901 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
4906 u16 mss = bp->dev->mtu - 40;
4907 u32 nsegs, n, segs = 0, flags;
4909 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4910 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4911 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4912 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4913 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4914 if (tpa_flags & BNXT_FLAG_GRO)
4915 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4917 req->flags = cpu_to_le32(flags);
4920 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4921 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4922 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4924 /* Number of segs are log2 units, and first packet is not
4925 * included as part of this units.
4927 if (mss <= BNXT_RX_PAGE_SIZE) {
4928 n = BNXT_RX_PAGE_SIZE / mss;
4929 nsegs = (MAX_SKB_FRAGS - 1) * n;
4931 n = mss / BNXT_RX_PAGE_SIZE;
4932 if (mss & (BNXT_RX_PAGE_SIZE - 1))
4934 nsegs = (MAX_SKB_FRAGS - n) / n;
4937 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4938 segs = MAX_TPA_SEGS_P5;
4939 max_aggs = bp->max_tpa;
4941 segs = ilog2(nsegs);
4943 req->max_agg_segs = cpu_to_le16(segs);
4944 req->max_aggs = cpu_to_le16(max_aggs);
4946 req->min_agg_len = cpu_to_le32(512);
4948 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4950 return hwrm_req_send(bp, req);
4953 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4955 struct bnxt_ring_grp_info *grp_info;
4957 grp_info = &bp->grp_info[ring->grp_idx];
4958 return grp_info->cp_fw_ring_id;
4961 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4963 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4964 struct bnxt_napi *bnapi = rxr->bnapi;
4965 struct bnxt_cp_ring_info *cpr;
4967 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4968 return cpr->cp_ring_struct.fw_ring_id;
4970 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4974 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4976 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4977 struct bnxt_napi *bnapi = txr->bnapi;
4978 struct bnxt_cp_ring_info *cpr;
4980 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4981 return cpr->cp_ring_struct.fw_ring_id;
4983 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4987 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
4991 if (bp->flags & BNXT_FLAG_CHIP_P5)
4992 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
4994 entries = HW_HASH_INDEX_SIZE;
4996 bp->rss_indir_tbl_entries = entries;
4997 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
4999 if (!bp->rss_indir_tbl)
5004 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5006 u16 max_rings, max_entries, pad, i;
5008 if (!bp->rx_nr_rings)
5011 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5012 max_rings = bp->rx_nr_rings - 1;
5014 max_rings = bp->rx_nr_rings;
5016 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5018 for (i = 0; i < max_entries; i++)
5019 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5021 pad = bp->rss_indir_tbl_entries - max_entries;
5023 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5026 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5028 u16 i, tbl_size, max_ring = 0;
5030 if (!bp->rss_indir_tbl)
5033 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5034 for (i = 0; i < tbl_size; i++)
5035 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5039 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5041 if (bp->flags & BNXT_FLAG_CHIP_P5)
5042 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5043 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5048 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5050 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5053 /* Fill the RSS indirection table with ring group ids */
5054 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5056 j = bp->rss_indir_tbl[i];
5057 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5061 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5062 struct bnxt_vnic_info *vnic)
5064 __le16 *ring_tbl = vnic->rss_table;
5065 struct bnxt_rx_ring_info *rxr;
5068 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5070 for (i = 0; i < tbl_size; i++) {
5073 j = bp->rss_indir_tbl[i];
5074 rxr = &bp->rx_ring[j];
5076 ring_id = rxr->rx_ring_struct.fw_ring_id;
5077 *ring_tbl++ = cpu_to_le16(ring_id);
5078 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5079 *ring_tbl++ = cpu_to_le16(ring_id);
5083 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5085 if (bp->flags & BNXT_FLAG_CHIP_P5)
5086 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5088 __bnxt_fill_hw_rss_tbl(bp, vnic);
5091 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5093 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5094 struct hwrm_vnic_rss_cfg_input *req;
5097 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5098 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5101 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5106 bnxt_fill_hw_rss_tbl(bp, vnic);
5107 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5108 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5109 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5110 req->hash_key_tbl_addr =
5111 cpu_to_le64(vnic->rss_hash_key_dma_addr);
5113 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5114 return hwrm_req_send(bp, req);
5117 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5119 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5120 struct hwrm_vnic_rss_cfg_input *req;
5121 dma_addr_t ring_tbl_map;
5125 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5129 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5131 return hwrm_req_send(bp, req);
5133 bnxt_fill_hw_rss_tbl(bp, vnic);
5134 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5135 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5136 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5137 ring_tbl_map = vnic->rss_table_dma_addr;
5138 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5140 hwrm_req_hold(bp, req);
5141 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5142 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5143 req->ring_table_pair_index = i;
5144 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5145 rc = hwrm_req_send(bp, req);
5151 hwrm_req_drop(bp, req);
5155 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5157 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5158 struct hwrm_vnic_plcmodes_cfg_input *req;
5161 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5165 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5166 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5167 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5169 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5170 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5171 /* thresholds not implemented in firmware yet */
5172 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5173 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5174 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5175 return hwrm_req_send(bp, req);
5178 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5181 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
5183 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5186 req->rss_cos_lb_ctx_id =
5187 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5189 hwrm_req_send(bp, req);
5190 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5193 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5197 for (i = 0; i < bp->nr_vnics; i++) {
5198 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5200 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5201 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5202 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5205 bp->rsscos_nr_ctxs = 0;
5208 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5210 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5211 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
5214 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5218 resp = hwrm_req_hold(bp, req);
5219 rc = hwrm_req_send(bp, req);
5221 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5222 le16_to_cpu(resp->rss_cos_lb_ctx_id);
5223 hwrm_req_drop(bp, req);
5228 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5230 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5231 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5232 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5235 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5237 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5238 struct hwrm_vnic_cfg_input *req;
5239 unsigned int ring = 0, grp_idx;
5243 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5247 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5248 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5250 req->default_rx_ring_id =
5251 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5252 req->default_cmpl_ring_id =
5253 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5255 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5256 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5259 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5260 /* Only RSS support for now TBD: COS & LB */
5261 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5262 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5263 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5264 VNIC_CFG_REQ_ENABLES_MRU);
5265 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5267 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5268 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5269 VNIC_CFG_REQ_ENABLES_MRU);
5270 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5272 req->rss_rule = cpu_to_le16(0xffff);
5275 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5276 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5277 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5278 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5280 req->cos_rule = cpu_to_le16(0xffff);
5283 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5285 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5287 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5288 ring = bp->rx_nr_rings - 1;
5290 grp_idx = bp->rx_ring[ring].bnapi->index;
5291 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5292 req->lb_rule = cpu_to_le16(0xffff);
5294 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5296 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5297 #ifdef CONFIG_BNXT_SRIOV
5299 def_vlan = bp->vf.vlan;
5301 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5302 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5303 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5304 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5306 return hwrm_req_send(bp, req);
5309 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5311 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5312 struct hwrm_vnic_free_input *req;
5314 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5318 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5320 hwrm_req_send(bp, req);
5321 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5325 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5329 for (i = 0; i < bp->nr_vnics; i++)
5330 bnxt_hwrm_vnic_free_one(bp, i);
5333 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5334 unsigned int start_rx_ring_idx,
5335 unsigned int nr_rings)
5337 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5338 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5339 struct hwrm_vnic_alloc_output *resp;
5340 struct hwrm_vnic_alloc_input *req;
5343 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5347 if (bp->flags & BNXT_FLAG_CHIP_P5)
5348 goto vnic_no_ring_grps;
5350 /* map ring groups to this vnic */
5351 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5352 grp_idx = bp->rx_ring[i].bnapi->index;
5353 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5354 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5358 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5362 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5363 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5365 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5367 resp = hwrm_req_hold(bp, req);
5368 rc = hwrm_req_send(bp, req);
5370 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5371 hwrm_req_drop(bp, req);
5375 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5377 struct hwrm_vnic_qcaps_output *resp;
5378 struct hwrm_vnic_qcaps_input *req;
5381 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5382 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5383 if (bp->hwrm_spec_code < 0x10600)
5386 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5390 resp = hwrm_req_hold(bp, req);
5391 rc = hwrm_req_send(bp, req);
5393 u32 flags = le32_to_cpu(resp->flags);
5395 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5396 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5397 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5399 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5400 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5402 /* Older P5 fw before EXT_HW_STATS support did not set
5403 * VLAN_STRIP_CAP properly.
5405 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5406 (BNXT_CHIP_P5_THOR(bp) &&
5407 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5408 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5409 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5410 if (bp->max_tpa_v2) {
5411 if (BNXT_CHIP_P5_THOR(bp))
5412 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5414 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5417 hwrm_req_drop(bp, req);
5421 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5423 struct hwrm_ring_grp_alloc_output *resp;
5424 struct hwrm_ring_grp_alloc_input *req;
5428 if (bp->flags & BNXT_FLAG_CHIP_P5)
5431 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5435 resp = hwrm_req_hold(bp, req);
5436 for (i = 0; i < bp->rx_nr_rings; i++) {
5437 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5439 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5440 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5441 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5442 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5444 rc = hwrm_req_send(bp, req);
5449 bp->grp_info[grp_idx].fw_grp_id =
5450 le32_to_cpu(resp->ring_group_id);
5452 hwrm_req_drop(bp, req);
5456 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5458 struct hwrm_ring_grp_free_input *req;
5461 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5464 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5467 hwrm_req_hold(bp, req);
5468 for (i = 0; i < bp->cp_nr_rings; i++) {
5469 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5471 req->ring_group_id =
5472 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5474 hwrm_req_send(bp, req);
5475 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5477 hwrm_req_drop(bp, req);
5480 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5481 struct bnxt_ring_struct *ring,
5482 u32 ring_type, u32 map_index)
5484 struct hwrm_ring_alloc_output *resp;
5485 struct hwrm_ring_alloc_input *req;
5486 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5487 struct bnxt_ring_grp_info *grp_info;
5491 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5496 if (rmem->nr_pages > 1) {
5497 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5498 /* Page size is in log2 units */
5499 req->page_size = BNXT_PAGE_SHIFT;
5500 req->page_tbl_depth = 1;
5502 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
5505 /* Association of ring index with doorbell index and MSIX number */
5506 req->logical_id = cpu_to_le16(map_index);
5508 switch (ring_type) {
5509 case HWRM_RING_ALLOC_TX: {
5510 struct bnxt_tx_ring_info *txr;
5512 txr = container_of(ring, struct bnxt_tx_ring_info,
5514 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5515 /* Association of transmit ring with completion ring */
5516 grp_info = &bp->grp_info[ring->grp_idx];
5517 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5518 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5519 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5520 req->queue_id = cpu_to_le16(ring->queue_id);
5523 case HWRM_RING_ALLOC_RX:
5524 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5525 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
5526 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5529 /* Association of rx ring with stats context */
5530 grp_info = &bp->grp_info[ring->grp_idx];
5531 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5532 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5533 req->enables |= cpu_to_le32(
5534 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5535 if (NET_IP_ALIGN == 2)
5536 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5537 req->flags = cpu_to_le16(flags);
5540 case HWRM_RING_ALLOC_AGG:
5541 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5542 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5543 /* Association of agg ring with rx ring */
5544 grp_info = &bp->grp_info[ring->grp_idx];
5545 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5546 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5547 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5548 req->enables |= cpu_to_le32(
5549 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5550 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5552 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5554 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5556 case HWRM_RING_ALLOC_CMPL:
5557 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5558 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5559 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5560 /* Association of cp ring with nq */
5561 grp_info = &bp->grp_info[map_index];
5562 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5563 req->cq_handle = cpu_to_le64(ring->handle);
5564 req->enables |= cpu_to_le32(
5565 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5566 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5567 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5570 case HWRM_RING_ALLOC_NQ:
5571 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5572 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5573 if (bp->flags & BNXT_FLAG_USING_MSIX)
5574 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5577 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5582 resp = hwrm_req_hold(bp, req);
5583 rc = hwrm_req_send(bp, req);
5584 err = le16_to_cpu(resp->error_code);
5585 ring_id = le16_to_cpu(resp->ring_id);
5586 hwrm_req_drop(bp, req);
5590 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5591 ring_type, rc, err);
5594 ring->fw_ring_id = ring_id;
5598 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5603 struct hwrm_func_cfg_input *req;
5605 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5609 req->fid = cpu_to_le16(0xffff);
5610 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5611 req->async_event_cr = cpu_to_le16(idx);
5612 return hwrm_req_send(bp, req);
5614 struct hwrm_func_vf_cfg_input *req;
5616 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5621 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5622 req->async_event_cr = cpu_to_le16(idx);
5623 return hwrm_req_send(bp, req);
5627 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5628 u32 map_idx, u32 xid)
5630 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5632 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5634 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5635 switch (ring_type) {
5636 case HWRM_RING_ALLOC_TX:
5637 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5639 case HWRM_RING_ALLOC_RX:
5640 case HWRM_RING_ALLOC_AGG:
5641 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5643 case HWRM_RING_ALLOC_CMPL:
5644 db->db_key64 = DBR_PATH_L2;
5646 case HWRM_RING_ALLOC_NQ:
5647 db->db_key64 = DBR_PATH_L2;
5650 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5652 db->doorbell = bp->bar1 + map_idx * 0x80;
5653 switch (ring_type) {
5654 case HWRM_RING_ALLOC_TX:
5655 db->db_key32 = DB_KEY_TX;
5657 case HWRM_RING_ALLOC_RX:
5658 case HWRM_RING_ALLOC_AGG:
5659 db->db_key32 = DB_KEY_RX;
5661 case HWRM_RING_ALLOC_CMPL:
5662 db->db_key32 = DB_KEY_CP;
5668 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5670 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5674 if (bp->flags & BNXT_FLAG_CHIP_P5)
5675 type = HWRM_RING_ALLOC_NQ;
5677 type = HWRM_RING_ALLOC_CMPL;
5678 for (i = 0; i < bp->cp_nr_rings; i++) {
5679 struct bnxt_napi *bnapi = bp->bnapi[i];
5680 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5681 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5682 u32 map_idx = ring->map_idx;
5683 unsigned int vector;
5685 vector = bp->irq_tbl[map_idx].vector;
5686 disable_irq_nosync(vector);
5687 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5692 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5693 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5695 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5698 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5700 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5704 type = HWRM_RING_ALLOC_TX;
5705 for (i = 0; i < bp->tx_nr_rings; i++) {
5706 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5707 struct bnxt_ring_struct *ring;
5710 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5711 struct bnxt_napi *bnapi = txr->bnapi;
5712 struct bnxt_cp_ring_info *cpr, *cpr2;
5713 u32 type2 = HWRM_RING_ALLOC_CMPL;
5715 cpr = &bnapi->cp_ring;
5716 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5717 ring = &cpr2->cp_ring_struct;
5718 ring->handle = BNXT_TX_HDL;
5719 map_idx = bnapi->index;
5720 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5723 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5725 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5727 ring = &txr->tx_ring_struct;
5729 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5732 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5735 type = HWRM_RING_ALLOC_RX;
5736 for (i = 0; i < bp->rx_nr_rings; i++) {
5737 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5738 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5739 struct bnxt_napi *bnapi = rxr->bnapi;
5740 u32 map_idx = bnapi->index;
5742 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5745 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5746 /* If we have agg rings, post agg buffers first. */
5748 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5749 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5750 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5751 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5752 u32 type2 = HWRM_RING_ALLOC_CMPL;
5753 struct bnxt_cp_ring_info *cpr2;
5755 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5756 ring = &cpr2->cp_ring_struct;
5757 ring->handle = BNXT_RX_HDL;
5758 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5761 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5763 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5768 type = HWRM_RING_ALLOC_AGG;
5769 for (i = 0; i < bp->rx_nr_rings; i++) {
5770 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5771 struct bnxt_ring_struct *ring =
5772 &rxr->rx_agg_ring_struct;
5773 u32 grp_idx = ring->grp_idx;
5774 u32 map_idx = grp_idx + bp->rx_nr_rings;
5776 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5780 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5782 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5783 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5784 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5791 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5792 struct bnxt_ring_struct *ring,
5793 u32 ring_type, int cmpl_ring_id)
5795 struct hwrm_ring_free_output *resp;
5796 struct hwrm_ring_free_input *req;
5800 if (BNXT_NO_FW_ACCESS(bp))
5803 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
5807 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
5808 req->ring_type = ring_type;
5809 req->ring_id = cpu_to_le16(ring->fw_ring_id);
5811 resp = hwrm_req_hold(bp, req);
5812 rc = hwrm_req_send(bp, req);
5813 error_code = le16_to_cpu(resp->error_code);
5814 hwrm_req_drop(bp, req);
5816 if (rc || error_code) {
5817 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5818 ring_type, rc, error_code);
5824 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5832 for (i = 0; i < bp->tx_nr_rings; i++) {
5833 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5834 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5836 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5837 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5839 hwrm_ring_free_send_msg(bp, ring,
5840 RING_FREE_REQ_RING_TYPE_TX,
5841 close_path ? cmpl_ring_id :
5842 INVALID_HW_RING_ID);
5843 ring->fw_ring_id = INVALID_HW_RING_ID;
5847 for (i = 0; i < bp->rx_nr_rings; i++) {
5848 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5849 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5850 u32 grp_idx = rxr->bnapi->index;
5852 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5853 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5855 hwrm_ring_free_send_msg(bp, ring,
5856 RING_FREE_REQ_RING_TYPE_RX,
5857 close_path ? cmpl_ring_id :
5858 INVALID_HW_RING_ID);
5859 ring->fw_ring_id = INVALID_HW_RING_ID;
5860 bp->grp_info[grp_idx].rx_fw_ring_id =
5865 if (bp->flags & BNXT_FLAG_CHIP_P5)
5866 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5868 type = RING_FREE_REQ_RING_TYPE_RX;
5869 for (i = 0; i < bp->rx_nr_rings; i++) {
5870 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5871 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5872 u32 grp_idx = rxr->bnapi->index;
5874 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5875 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5877 hwrm_ring_free_send_msg(bp, ring, type,
5878 close_path ? cmpl_ring_id :
5879 INVALID_HW_RING_ID);
5880 ring->fw_ring_id = INVALID_HW_RING_ID;
5881 bp->grp_info[grp_idx].agg_fw_ring_id =
5886 /* The completion rings are about to be freed. After that the
5887 * IRQ doorbell will not work anymore. So we need to disable
5890 bnxt_disable_int_sync(bp);
5892 if (bp->flags & BNXT_FLAG_CHIP_P5)
5893 type = RING_FREE_REQ_RING_TYPE_NQ;
5895 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5896 for (i = 0; i < bp->cp_nr_rings; i++) {
5897 struct bnxt_napi *bnapi = bp->bnapi[i];
5898 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5899 struct bnxt_ring_struct *ring;
5902 for (j = 0; j < 2; j++) {
5903 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5906 ring = &cpr2->cp_ring_struct;
5907 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5909 hwrm_ring_free_send_msg(bp, ring,
5910 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5911 INVALID_HW_RING_ID);
5912 ring->fw_ring_id = INVALID_HW_RING_ID;
5915 ring = &cpr->cp_ring_struct;
5916 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5917 hwrm_ring_free_send_msg(bp, ring, type,
5918 INVALID_HW_RING_ID);
5919 ring->fw_ring_id = INVALID_HW_RING_ID;
5920 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5925 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5928 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5930 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5931 struct hwrm_func_qcfg_output *resp;
5932 struct hwrm_func_qcfg_input *req;
5935 if (bp->hwrm_spec_code < 0x10601)
5938 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
5942 req->fid = cpu_to_le16(0xffff);
5943 resp = hwrm_req_hold(bp, req);
5944 rc = hwrm_req_send(bp, req);
5946 hwrm_req_drop(bp, req);
5950 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5951 if (BNXT_NEW_RM(bp)) {
5954 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5955 hw_resc->resv_hw_ring_grps =
5956 le32_to_cpu(resp->alloc_hw_ring_grps);
5957 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5958 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5959 stats = le16_to_cpu(resp->alloc_stat_ctx);
5960 hw_resc->resv_irqs = cp;
5961 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5962 int rx = hw_resc->resv_rx_rings;
5963 int tx = hw_resc->resv_tx_rings;
5965 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5967 if (cp < (rx + tx)) {
5968 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5969 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5971 hw_resc->resv_rx_rings = rx;
5972 hw_resc->resv_tx_rings = tx;
5974 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5975 hw_resc->resv_hw_ring_grps = rx;
5977 hw_resc->resv_cp_rings = cp;
5978 hw_resc->resv_stat_ctxs = stats;
5980 hwrm_req_drop(bp, req);
5984 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5986 struct hwrm_func_qcfg_output *resp;
5987 struct hwrm_func_qcfg_input *req;
5990 if (bp->hwrm_spec_code < 0x10601)
5993 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
5997 req->fid = cpu_to_le16(fid);
5998 resp = hwrm_req_hold(bp, req);
5999 rc = hwrm_req_send(bp, req);
6001 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6003 hwrm_req_drop(bp, req);
6007 static bool bnxt_rfs_supported(struct bnxt *bp);
6009 static struct hwrm_func_cfg_input *
6010 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6011 int ring_grps, int cp_rings, int stats, int vnics)
6013 struct hwrm_func_cfg_input *req;
6016 if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6019 req->fid = cpu_to_le16(0xffff);
6020 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6021 req->num_tx_rings = cpu_to_le16(tx_rings);
6022 if (BNXT_NEW_RM(bp)) {
6023 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6024 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6025 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6026 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6027 enables |= tx_rings + ring_grps ?
6028 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6029 enables |= rx_rings ?
6030 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6032 enables |= cp_rings ?
6033 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6034 enables |= ring_grps ?
6035 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6036 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6038 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6040 req->num_rx_rings = cpu_to_le16(rx_rings);
6041 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6042 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6043 req->num_msix = cpu_to_le16(cp_rings);
6044 req->num_rsscos_ctxs =
6045 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6047 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6048 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6049 req->num_rsscos_ctxs = cpu_to_le16(1);
6050 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6051 bnxt_rfs_supported(bp))
6052 req->num_rsscos_ctxs =
6053 cpu_to_le16(ring_grps + 1);
6055 req->num_stat_ctxs = cpu_to_le16(stats);
6056 req->num_vnics = cpu_to_le16(vnics);
6058 req->enables = cpu_to_le32(enables);
6062 static struct hwrm_func_vf_cfg_input *
6063 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6064 int ring_grps, int cp_rings, int stats, int vnics)
6066 struct hwrm_func_vf_cfg_input *req;
6069 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6072 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6073 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6074 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6075 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6076 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6077 enables |= tx_rings + ring_grps ?
6078 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6080 enables |= cp_rings ?
6081 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6082 enables |= ring_grps ?
6083 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6085 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6086 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6088 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6089 req->num_tx_rings = cpu_to_le16(tx_rings);
6090 req->num_rx_rings = cpu_to_le16(rx_rings);
6091 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6092 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6093 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6095 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6096 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6097 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6099 req->num_stat_ctxs = cpu_to_le16(stats);
6100 req->num_vnics = cpu_to_le16(vnics);
6102 req->enables = cpu_to_le32(enables);
6107 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6108 int ring_grps, int cp_rings, int stats, int vnics)
6110 struct hwrm_func_cfg_input *req;
6113 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6114 cp_rings, stats, vnics);
6118 if (!req->enables) {
6119 hwrm_req_drop(bp, req);
6123 rc = hwrm_req_send(bp, req);
6127 if (bp->hwrm_spec_code < 0x10601)
6128 bp->hw_resc.resv_tx_rings = tx_rings;
6130 return bnxt_hwrm_get_rings(bp);
6134 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6135 int ring_grps, int cp_rings, int stats, int vnics)
6137 struct hwrm_func_vf_cfg_input *req;
6140 if (!BNXT_NEW_RM(bp)) {
6141 bp->hw_resc.resv_tx_rings = tx_rings;
6145 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6146 cp_rings, stats, vnics);
6150 rc = hwrm_req_send(bp, req);
6154 return bnxt_hwrm_get_rings(bp);
6157 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6158 int cp, int stat, int vnic)
6161 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6164 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6168 int bnxt_nq_rings_in_use(struct bnxt *bp)
6170 int cp = bp->cp_nr_rings;
6171 int ulp_msix, ulp_base;
6173 ulp_msix = bnxt_get_ulp_msix_num(bp);
6175 ulp_base = bnxt_get_ulp_msix_base(bp);
6177 if ((ulp_base + ulp_msix) > cp)
6178 cp = ulp_base + ulp_msix;
6183 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6187 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6188 return bnxt_nq_rings_in_use(bp);
6190 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6194 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6196 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6197 int cp = bp->cp_nr_rings;
6202 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6203 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6205 return cp + ulp_stat;
6208 /* Check if a default RSS map needs to be setup. This function is only
6209 * used on older firmware that does not require reserving RX rings.
6211 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6213 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6215 /* The RSS map is valid for RX rings set to resv_rx_rings */
6216 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6217 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6218 if (!netif_is_rxfh_configured(bp->dev))
6219 bnxt_set_dflt_rss_indir_tbl(bp);
6223 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6225 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6226 int cp = bnxt_cp_rings_in_use(bp);
6227 int nq = bnxt_nq_rings_in_use(bp);
6228 int rx = bp->rx_nr_rings, stat;
6229 int vnic = 1, grp = rx;
6231 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6232 bp->hwrm_spec_code >= 0x10601)
6235 /* Old firmware does not need RX ring reservations but we still
6236 * need to setup a default RSS map when needed. With new firmware
6237 * we go through RX ring reservations first and then set up the
6238 * RSS map for the successfully reserved RX rings when needed.
6240 if (!BNXT_NEW_RM(bp)) {
6241 bnxt_check_rss_tbl_no_rmgr(bp);
6244 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6246 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6248 stat = bnxt_get_func_stat_ctxs(bp);
6249 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6250 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6251 (hw_resc->resv_hw_ring_grps != grp &&
6252 !(bp->flags & BNXT_FLAG_CHIP_P5)))
6254 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6255 hw_resc->resv_irqs != nq)
6260 static int __bnxt_reserve_rings(struct bnxt *bp)
6262 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6263 int cp = bnxt_nq_rings_in_use(bp);
6264 int tx = bp->tx_nr_rings;
6265 int rx = bp->rx_nr_rings;
6266 int grp, rx_rings, rc;
6270 if (!bnxt_need_reserve_rings(bp))
6273 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6275 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6277 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6279 grp = bp->rx_nr_rings;
6280 stat = bnxt_get_func_stat_ctxs(bp);
6282 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6286 tx = hw_resc->resv_tx_rings;
6287 if (BNXT_NEW_RM(bp)) {
6288 rx = hw_resc->resv_rx_rings;
6289 cp = hw_resc->resv_irqs;
6290 grp = hw_resc->resv_hw_ring_grps;
6291 vnic = hw_resc->resv_vnics;
6292 stat = hw_resc->resv_stat_ctxs;
6296 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6300 if (netif_running(bp->dev))
6303 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6304 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6305 bp->dev->hw_features &= ~NETIF_F_LRO;
6306 bp->dev->features &= ~NETIF_F_LRO;
6307 bnxt_set_ring_params(bp);
6310 rx_rings = min_t(int, rx_rings, grp);
6311 cp = min_t(int, cp, bp->cp_nr_rings);
6312 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6313 stat -= bnxt_get_ulp_stat_ctxs(bp);
6314 cp = min_t(int, cp, stat);
6315 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6316 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6318 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6319 bp->tx_nr_rings = tx;
6321 /* If we cannot reserve all the RX rings, reset the RSS map only
6322 * if absolutely necessary
6324 if (rx_rings != bp->rx_nr_rings) {
6325 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6326 rx_rings, bp->rx_nr_rings);
6327 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6328 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6329 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6330 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6331 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6332 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6335 bp->rx_nr_rings = rx_rings;
6336 bp->cp_nr_rings = cp;
6338 if (!tx || !rx || !cp || !grp || !vnic || !stat)
6341 if (!netif_is_rxfh_configured(bp->dev))
6342 bnxt_set_dflt_rss_indir_tbl(bp);
6347 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6348 int ring_grps, int cp_rings, int stats,
6351 struct hwrm_func_vf_cfg_input *req;
6354 if (!BNXT_NEW_RM(bp))
6357 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6358 cp_rings, stats, vnics);
6359 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6360 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6361 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6362 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6363 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6364 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6365 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6366 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6368 req->flags = cpu_to_le32(flags);
6369 return hwrm_req_send_silent(bp, req);
6372 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6373 int ring_grps, int cp_rings, int stats,
6376 struct hwrm_func_cfg_input *req;
6379 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6380 cp_rings, stats, vnics);
6381 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6382 if (BNXT_NEW_RM(bp)) {
6383 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6384 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6385 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6386 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6387 if (bp->flags & BNXT_FLAG_CHIP_P5)
6388 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6389 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6391 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6394 req->flags = cpu_to_le32(flags);
6395 return hwrm_req_send_silent(bp, req);
6398 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6399 int ring_grps, int cp_rings, int stats,
6402 if (bp->hwrm_spec_code < 0x10801)
6406 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6407 ring_grps, cp_rings, stats,
6410 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6411 cp_rings, stats, vnics);
6414 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6416 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6417 struct hwrm_ring_aggint_qcaps_output *resp;
6418 struct hwrm_ring_aggint_qcaps_input *req;
6421 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6422 coal_cap->num_cmpl_dma_aggr_max = 63;
6423 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6424 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6425 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6426 coal_cap->int_lat_tmr_min_max = 65535;
6427 coal_cap->int_lat_tmr_max_max = 65535;
6428 coal_cap->num_cmpl_aggr_int_max = 65535;
6429 coal_cap->timer_units = 80;
6431 if (bp->hwrm_spec_code < 0x10902)
6434 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6437 resp = hwrm_req_hold(bp, req);
6438 rc = hwrm_req_send_silent(bp, req);
6440 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6441 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6442 coal_cap->num_cmpl_dma_aggr_max =
6443 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6444 coal_cap->num_cmpl_dma_aggr_during_int_max =
6445 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6446 coal_cap->cmpl_aggr_dma_tmr_max =
6447 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6448 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6449 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6450 coal_cap->int_lat_tmr_min_max =
6451 le16_to_cpu(resp->int_lat_tmr_min_max);
6452 coal_cap->int_lat_tmr_max_max =
6453 le16_to_cpu(resp->int_lat_tmr_max_max);
6454 coal_cap->num_cmpl_aggr_int_max =
6455 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6456 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6458 hwrm_req_drop(bp, req);
6461 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6463 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6465 return usec * 1000 / coal_cap->timer_units;
6468 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6469 struct bnxt_coal *hw_coal,
6470 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6472 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6473 u32 cmpl_params = coal_cap->cmpl_params;
6474 u16 val, tmr, max, flags = 0;
6476 max = hw_coal->bufs_per_record * 128;
6477 if (hw_coal->budget)
6478 max = hw_coal->bufs_per_record * hw_coal->budget;
6479 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6481 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6482 req->num_cmpl_aggr_int = cpu_to_le16(val);
6484 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6485 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6487 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6488 coal_cap->num_cmpl_dma_aggr_during_int_max);
6489 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6491 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6492 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6493 req->int_lat_tmr_max = cpu_to_le16(tmr);
6495 /* min timer set to 1/2 of interrupt timer */
6496 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6498 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6499 req->int_lat_tmr_min = cpu_to_le16(val);
6500 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6503 /* buf timer set to 1/4 of interrupt timer */
6504 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6505 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6508 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6509 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6510 val = clamp_t(u16, tmr, 1,
6511 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6512 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6514 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6517 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6518 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6519 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6520 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6521 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6522 req->flags = cpu_to_le16(flags);
6523 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6526 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6527 struct bnxt_coal *hw_coal)
6529 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
6530 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6531 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6532 u32 nq_params = coal_cap->nq_params;
6536 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6539 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6543 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6545 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6547 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6548 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6549 req->int_lat_tmr_min = cpu_to_le16(tmr);
6550 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6551 return hwrm_req_send(bp, req);
6554 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6556 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6557 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6558 struct bnxt_coal coal;
6561 /* Tick values in micro seconds.
6562 * 1 coal_buf x bufs_per_record = 1 completion record.
6564 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6566 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6567 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6569 if (!bnapi->rx_ring)
6572 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6576 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6578 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6580 return hwrm_req_send(bp, req_rx);
6583 int bnxt_hwrm_set_coal(struct bnxt *bp)
6585 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6589 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6593 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6595 hwrm_req_drop(bp, req_rx);
6599 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6600 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
6602 hwrm_req_hold(bp, req_rx);
6603 hwrm_req_hold(bp, req_tx);
6604 for (i = 0; i < bp->cp_nr_rings; i++) {
6605 struct bnxt_napi *bnapi = bp->bnapi[i];
6606 struct bnxt_coal *hw_coal;
6610 if (!bnapi->rx_ring) {
6611 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6614 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6616 req->ring_id = cpu_to_le16(ring_id);
6618 rc = hwrm_req_send(bp, req);
6622 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6625 if (bnapi->rx_ring && bnapi->tx_ring) {
6627 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6628 req->ring_id = cpu_to_le16(ring_id);
6629 rc = hwrm_req_send(bp, req);
6634 hw_coal = &bp->rx_coal;
6636 hw_coal = &bp->tx_coal;
6637 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6639 hwrm_req_drop(bp, req_rx);
6640 hwrm_req_drop(bp, req_tx);
6644 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6646 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6647 struct hwrm_stat_ctx_free_input *req;
6653 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6656 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6658 if (BNXT_FW_MAJ(bp) <= 20) {
6659 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6660 hwrm_req_drop(bp, req);
6663 hwrm_req_hold(bp, req0);
6665 hwrm_req_hold(bp, req);
6666 for (i = 0; i < bp->cp_nr_rings; i++) {
6667 struct bnxt_napi *bnapi = bp->bnapi[i];
6668 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6670 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6671 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6673 req0->stat_ctx_id = req->stat_ctx_id;
6674 hwrm_req_send(bp, req0);
6676 hwrm_req_send(bp, req);
6678 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6681 hwrm_req_drop(bp, req);
6683 hwrm_req_drop(bp, req0);
6686 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6688 struct hwrm_stat_ctx_alloc_output *resp;
6689 struct hwrm_stat_ctx_alloc_input *req;
6692 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6695 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6699 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6700 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6702 resp = hwrm_req_hold(bp, req);
6703 for (i = 0; i < bp->cp_nr_rings; i++) {
6704 struct bnxt_napi *bnapi = bp->bnapi[i];
6705 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6707 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6709 rc = hwrm_req_send(bp, req);
6713 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6715 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6717 hwrm_req_drop(bp, req);
6721 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6723 struct hwrm_func_qcfg_output *resp;
6724 struct hwrm_func_qcfg_input *req;
6725 u32 min_db_offset = 0;
6729 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6733 req->fid = cpu_to_le16(0xffff);
6734 resp = hwrm_req_hold(bp, req);
6735 rc = hwrm_req_send(bp, req);
6737 goto func_qcfg_exit;
6739 #ifdef CONFIG_BNXT_SRIOV
6741 struct bnxt_vf_info *vf = &bp->vf;
6743 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6745 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6748 flags = le16_to_cpu(resp->flags);
6749 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6750 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6751 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6752 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6753 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6755 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6756 bp->flags |= BNXT_FLAG_MULTI_HOST;
6757 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6758 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6760 switch (resp->port_partition_type) {
6761 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6762 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6763 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6764 bp->port_partition_type = resp->port_partition_type;
6767 if (bp->hwrm_spec_code < 0x10707 ||
6768 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6769 bp->br_mode = BRIDGE_MODE_VEB;
6770 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6771 bp->br_mode = BRIDGE_MODE_VEPA;
6773 bp->br_mode = BRIDGE_MODE_UNDEF;
6775 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6777 bp->max_mtu = BNXT_MAX_MTU;
6780 goto func_qcfg_exit;
6782 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6784 min_db_offset = DB_PF_OFFSET_P5;
6786 min_db_offset = DB_VF_OFFSET_P5;
6788 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6790 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6791 bp->db_size <= min_db_offset)
6792 bp->db_size = pci_resource_len(bp->pdev, 2);
6795 hwrm_req_drop(bp, req);
6799 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6800 struct hwrm_func_backing_store_qcaps_output *resp)
6802 struct bnxt_mem_init *mem_init;
6808 init_val = resp->ctx_kind_initializer;
6809 init_mask = le16_to_cpu(resp->ctx_init_mask);
6810 offset = &resp->qp_init_offset;
6811 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6812 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
6813 mem_init->init_val = init_val;
6814 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6817 if (i == BNXT_CTX_MEM_INIT_STAT)
6818 offset = &resp->stat_init_offset;
6819 if (init_mask & (1 << i))
6820 mem_init->offset = *offset * 4;
6822 mem_init->init_val = 0;
6824 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6825 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6826 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6827 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6828 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6829 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
6832 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6834 struct hwrm_func_backing_store_qcaps_output *resp;
6835 struct hwrm_func_backing_store_qcaps_input *req;
6838 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6841 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
6845 resp = hwrm_req_hold(bp, req);
6846 rc = hwrm_req_send_silent(bp, req);
6848 struct bnxt_ctx_pg_info *ctx_pg;
6849 struct bnxt_ctx_mem_info *ctx;
6852 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6857 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6858 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6859 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6860 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6861 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6862 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6863 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6864 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6865 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6866 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6867 ctx->vnic_max_vnic_entries =
6868 le16_to_cpu(resp->vnic_max_vnic_entries);
6869 ctx->vnic_max_ring_table_entries =
6870 le16_to_cpu(resp->vnic_max_ring_table_entries);
6871 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6872 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6873 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6874 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6875 ctx->tqm_min_entries_per_ring =
6876 le32_to_cpu(resp->tqm_min_entries_per_ring);
6877 ctx->tqm_max_entries_per_ring =
6878 le32_to_cpu(resp->tqm_max_entries_per_ring);
6879 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6880 if (!ctx->tqm_entries_multiple)
6881 ctx->tqm_entries_multiple = 1;
6882 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6883 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6884 ctx->mrav_num_entries_units =
6885 le16_to_cpu(resp->mrav_num_entries_units);
6886 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6887 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6889 bnxt_init_ctx_initializer(ctx, resp);
6891 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6892 if (!ctx->tqm_fp_rings_count)
6893 ctx->tqm_fp_rings_count = bp->max_q;
6894 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6895 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
6897 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
6898 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6904 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6905 ctx->tqm_mem[i] = ctx_pg;
6911 hwrm_req_drop(bp, req);
6915 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6918 if (!rmem->nr_pages)
6921 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
6922 if (rmem->depth >= 1) {
6923 if (rmem->depth == 2)
6927 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6929 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6933 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6934 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6935 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6936 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6937 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6938 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6940 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6942 struct hwrm_func_backing_store_cfg_input *req;
6943 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6944 struct bnxt_ctx_pg_info *ctx_pg;
6945 void **__req = (void **)&req;
6946 u32 req_len = sizeof(*req);
6947 __le32 *num_entries;
6958 if (req_len > bp->hwrm_max_ext_req_len)
6959 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
6960 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
6964 req->enables = cpu_to_le32(enables);
6965 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6966 ctx_pg = &ctx->qp_mem;
6967 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
6968 req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6969 req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6970 req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6971 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6972 &req->qpc_pg_size_qpc_lvl,
6973 &req->qpc_page_dir);
6975 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6976 ctx_pg = &ctx->srq_mem;
6977 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
6978 req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6979 req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6980 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6981 &req->srq_pg_size_srq_lvl,
6982 &req->srq_page_dir);
6984 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6985 ctx_pg = &ctx->cq_mem;
6986 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
6987 req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6988 req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6989 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6990 &req->cq_pg_size_cq_lvl,
6993 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6994 ctx_pg = &ctx->vnic_mem;
6995 req->vnic_num_vnic_entries =
6996 cpu_to_le16(ctx->vnic_max_vnic_entries);
6997 req->vnic_num_ring_table_entries =
6998 cpu_to_le16(ctx->vnic_max_ring_table_entries);
6999 req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7000 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7001 &req->vnic_pg_size_vnic_lvl,
7002 &req->vnic_page_dir);
7004 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7005 ctx_pg = &ctx->stat_mem;
7006 req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7007 req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7008 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7009 &req->stat_pg_size_stat_lvl,
7010 &req->stat_page_dir);
7012 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7013 ctx_pg = &ctx->mrav_mem;
7014 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7015 if (ctx->mrav_num_entries_units)
7017 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7018 req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7019 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7020 &req->mrav_pg_size_mrav_lvl,
7021 &req->mrav_page_dir);
7023 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7024 ctx_pg = &ctx->tim_mem;
7025 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7026 req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7027 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7028 &req->tim_pg_size_tim_lvl,
7029 &req->tim_page_dir);
7031 for (i = 0, num_entries = &req->tqm_sp_num_entries,
7032 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7033 pg_dir = &req->tqm_sp_page_dir,
7034 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7035 i < BNXT_MAX_TQM_RINGS;
7036 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7037 if (!(enables & ena))
7040 req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7041 ctx_pg = ctx->tqm_mem[i];
7042 *num_entries = cpu_to_le32(ctx_pg->entries);
7043 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7045 req->flags = cpu_to_le32(flags);
7046 return hwrm_req_send(bp, req);
7049 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7050 struct bnxt_ctx_pg_info *ctx_pg)
7052 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7054 rmem->page_size = BNXT_PAGE_SIZE;
7055 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7056 rmem->dma_arr = ctx_pg->ctx_dma_arr;
7057 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7058 if (rmem->depth >= 1)
7059 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7060 return bnxt_alloc_ring(bp, rmem);
7063 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7064 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7065 u8 depth, struct bnxt_mem_init *mem_init)
7067 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7073 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7074 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7075 ctx_pg->nr_pages = 0;
7078 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7082 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7084 if (!ctx_pg->ctx_pg_tbl)
7086 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7087 rmem->nr_pages = nr_tbls;
7088 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7091 for (i = 0; i < nr_tbls; i++) {
7092 struct bnxt_ctx_pg_info *pg_tbl;
7094 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7097 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7098 rmem = &pg_tbl->ring_mem;
7099 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7100 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7102 rmem->nr_pages = MAX_CTX_PAGES;
7103 rmem->mem_init = mem_init;
7104 if (i == (nr_tbls - 1)) {
7105 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7108 rmem->nr_pages = rem;
7110 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7115 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7116 if (rmem->nr_pages > 1 || depth)
7118 rmem->mem_init = mem_init;
7119 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7124 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7125 struct bnxt_ctx_pg_info *ctx_pg)
7127 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7129 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7130 ctx_pg->ctx_pg_tbl) {
7131 int i, nr_tbls = rmem->nr_pages;
7133 for (i = 0; i < nr_tbls; i++) {
7134 struct bnxt_ctx_pg_info *pg_tbl;
7135 struct bnxt_ring_mem_info *rmem2;
7137 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7140 rmem2 = &pg_tbl->ring_mem;
7141 bnxt_free_ring(bp, rmem2);
7142 ctx_pg->ctx_pg_arr[i] = NULL;
7144 ctx_pg->ctx_pg_tbl[i] = NULL;
7146 kfree(ctx_pg->ctx_pg_tbl);
7147 ctx_pg->ctx_pg_tbl = NULL;
7149 bnxt_free_ring(bp, rmem);
7150 ctx_pg->nr_pages = 0;
7153 static void bnxt_free_ctx_mem(struct bnxt *bp)
7155 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7161 if (ctx->tqm_mem[0]) {
7162 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7163 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7164 kfree(ctx->tqm_mem[0]);
7165 ctx->tqm_mem[0] = NULL;
7168 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7169 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7170 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7171 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7172 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7173 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7174 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7175 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7178 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7180 struct bnxt_ctx_pg_info *ctx_pg;
7181 struct bnxt_ctx_mem_info *ctx;
7182 struct bnxt_mem_init *init;
7183 u32 mem_size, ena, entries;
7184 u32 entries_sp, min;
7191 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7193 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7198 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7201 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7207 ctx_pg = &ctx->qp_mem;
7208 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7210 if (ctx->qp_entry_size) {
7211 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7212 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7213 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7218 ctx_pg = &ctx->srq_mem;
7219 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7220 if (ctx->srq_entry_size) {
7221 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7222 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7223 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7228 ctx_pg = &ctx->cq_mem;
7229 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7230 if (ctx->cq_entry_size) {
7231 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7232 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7233 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7238 ctx_pg = &ctx->vnic_mem;
7239 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7240 ctx->vnic_max_ring_table_entries;
7241 if (ctx->vnic_entry_size) {
7242 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7243 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7244 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7249 ctx_pg = &ctx->stat_mem;
7250 ctx_pg->entries = ctx->stat_max_entries;
7251 if (ctx->stat_entry_size) {
7252 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7253 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7254 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7260 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7263 ctx_pg = &ctx->mrav_mem;
7264 /* 128K extra is needed to accommodate static AH context
7265 * allocation by f/w.
7267 num_mr = 1024 * 256;
7268 num_ah = 1024 * 128;
7269 ctx_pg->entries = num_mr + num_ah;
7270 if (ctx->mrav_entry_size) {
7271 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7272 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7273 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7277 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7278 if (ctx->mrav_num_entries_units)
7280 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7281 (num_ah / ctx->mrav_num_entries_units);
7283 ctx_pg = &ctx->tim_mem;
7284 ctx_pg->entries = ctx->qp_mem.entries;
7285 if (ctx->tim_entry_size) {
7286 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7287 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7291 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7294 min = ctx->tqm_min_entries_per_ring;
7295 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7296 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7297 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7298 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7299 entries = roundup(entries, ctx->tqm_entries_multiple);
7300 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7301 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7302 ctx_pg = ctx->tqm_mem[i];
7303 ctx_pg->entries = i ? entries : entries_sp;
7304 if (ctx->tqm_entry_size) {
7305 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7306 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7311 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7313 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7314 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7316 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7320 ctx->flags |= BNXT_CTX_FLAG_INITED;
7324 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7326 struct hwrm_func_resource_qcaps_output *resp;
7327 struct hwrm_func_resource_qcaps_input *req;
7328 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7331 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7335 req->fid = cpu_to_le16(0xffff);
7336 resp = hwrm_req_hold(bp, req);
7337 rc = hwrm_req_send_silent(bp, req);
7339 goto hwrm_func_resc_qcaps_exit;
7341 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7343 goto hwrm_func_resc_qcaps_exit;
7345 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7346 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7347 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7348 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7349 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7350 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7351 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7352 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7353 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7354 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7355 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7356 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7357 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7358 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7359 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7360 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7362 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7363 u16 max_msix = le16_to_cpu(resp->max_msix);
7365 hw_resc->max_nqs = max_msix;
7366 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7370 struct bnxt_pf_info *pf = &bp->pf;
7372 pf->vf_resv_strategy =
7373 le16_to_cpu(resp->vf_reservation_strategy);
7374 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7375 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7377 hwrm_func_resc_qcaps_exit:
7378 hwrm_req_drop(bp, req);
7382 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7384 struct hwrm_port_mac_ptp_qcfg_output *resp;
7385 struct hwrm_port_mac_ptp_qcfg_input *req;
7386 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7390 if (bp->hwrm_spec_code < 0x10801) {
7395 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
7399 req->port_id = cpu_to_le16(bp->pf.port_id);
7400 resp = hwrm_req_hold(bp, req);
7401 rc = hwrm_req_send(bp, req);
7405 flags = resp->flags;
7406 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7411 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7419 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7420 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7421 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7422 } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7423 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7424 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7429 rc = bnxt_ptp_init(bp);
7431 netdev_warn(bp->dev, "PTP initialization failed.\n");
7433 hwrm_req_drop(bp, req);
7444 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7446 struct hwrm_func_qcaps_output *resp;
7447 struct hwrm_func_qcaps_input *req;
7448 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7449 u32 flags, flags_ext;
7452 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7456 req->fid = cpu_to_le16(0xffff);
7457 resp = hwrm_req_hold(bp, req);
7458 rc = hwrm_req_send(bp, req);
7460 goto hwrm_func_qcaps_exit;
7462 flags = le32_to_cpu(resp->flags);
7463 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7464 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7465 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7466 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7467 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7468 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7469 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7470 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7471 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7472 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7473 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7474 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7475 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7476 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7477 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7478 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7480 flags_ext = le32_to_cpu(resp->flags_ext);
7481 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7482 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7483 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7484 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
7486 bp->tx_push_thresh = 0;
7487 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7488 BNXT_FW_MAJ(bp) > 217)
7489 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7491 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7492 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7493 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7494 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7495 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7496 if (!hw_resc->max_hw_ring_grps)
7497 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7498 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7499 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7500 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7503 struct bnxt_pf_info *pf = &bp->pf;
7505 pf->fw_fid = le16_to_cpu(resp->fid);
7506 pf->port_id = le16_to_cpu(resp->port_id);
7507 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7508 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7509 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7510 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7511 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7512 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7513 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7514 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7515 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7516 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7517 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7518 bp->flags |= BNXT_FLAG_WOL_CAP;
7519 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7520 __bnxt_hwrm_ptp_qcfg(bp);
7527 #ifdef CONFIG_BNXT_SRIOV
7528 struct bnxt_vf_info *vf = &bp->vf;
7530 vf->fw_fid = le16_to_cpu(resp->fid);
7531 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7535 hwrm_func_qcaps_exit:
7536 hwrm_req_drop(bp, req);
7540 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7542 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7546 rc = __bnxt_hwrm_func_qcaps(bp);
7549 rc = bnxt_hwrm_queue_qportcfg(bp);
7551 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7554 if (bp->hwrm_spec_code >= 0x10803) {
7555 rc = bnxt_alloc_ctx_mem(bp);
7558 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7560 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7565 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7567 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7568 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
7572 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7575 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7579 resp = hwrm_req_hold(bp, req);
7580 rc = hwrm_req_send(bp, req);
7582 goto hwrm_cfa_adv_qcaps_exit;
7584 flags = le32_to_cpu(resp->flags);
7586 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7587 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7589 hwrm_cfa_adv_qcaps_exit:
7590 hwrm_req_drop(bp, req);
7594 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7599 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7606 static int bnxt_alloc_fw_health(struct bnxt *bp)
7610 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7611 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7614 rc = __bnxt_alloc_fw_health(bp);
7616 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7617 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7624 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7626 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7627 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7628 BNXT_FW_HEALTH_WIN_MAP_OFF);
7631 bool bnxt_is_fw_healthy(struct bnxt *bp)
7633 if (bp->fw_health && bp->fw_health->status_reliable) {
7636 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7637 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7644 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7646 struct bnxt_fw_health *fw_health = bp->fw_health;
7649 if (!fw_health || !fw_health->status_reliable)
7652 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7653 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7654 fw_health->status_reliable = false;
7657 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7665 bp->fw_health->status_reliable = false;
7667 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7668 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7670 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7671 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7672 if (!bp->chip_num) {
7673 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7674 bp->chip_num = readl(bp->bar0 +
7675 BNXT_FW_HEALTH_WIN_BASE +
7676 BNXT_GRC_REG_CHIP_NUM);
7678 if (!BNXT_CHIP_P5(bp))
7681 status_loc = BNXT_GRC_REG_STATUS_P5 |
7682 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7684 status_loc = readl(hs + offsetof(struct hcomm_status,
7688 if (__bnxt_alloc_fw_health(bp)) {
7689 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7693 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7694 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7695 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7696 __bnxt_map_fw_health_reg(bp, status_loc);
7697 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7698 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7701 bp->fw_health->status_reliable = true;
7704 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7706 struct bnxt_fw_health *fw_health = bp->fw_health;
7707 u32 reg_base = 0xffffffff;
7710 bp->fw_health->status_reliable = false;
7711 /* Only pre-map the monitoring GRC registers using window 3 */
7712 for (i = 0; i < 4; i++) {
7713 u32 reg = fw_health->regs[i];
7715 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7717 if (reg_base == 0xffffffff)
7718 reg_base = reg & BNXT_GRC_BASE_MASK;
7719 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7721 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7723 bp->fw_health->status_reliable = true;
7724 if (reg_base == 0xffffffff)
7727 __bnxt_map_fw_health_reg(bp, reg_base);
7731 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7733 struct bnxt_fw_health *fw_health = bp->fw_health;
7734 struct hwrm_error_recovery_qcfg_output *resp;
7735 struct hwrm_error_recovery_qcfg_input *req;
7738 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7741 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7745 resp = hwrm_req_hold(bp, req);
7746 rc = hwrm_req_send(bp, req);
7748 goto err_recovery_out;
7749 fw_health->flags = le32_to_cpu(resp->flags);
7750 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7751 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7753 goto err_recovery_out;
7755 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7756 fw_health->master_func_wait_dsecs =
7757 le32_to_cpu(resp->master_func_wait_period);
7758 fw_health->normal_func_wait_dsecs =
7759 le32_to_cpu(resp->normal_func_wait_period);
7760 fw_health->post_reset_wait_dsecs =
7761 le32_to_cpu(resp->master_func_wait_period_after_reset);
7762 fw_health->post_reset_max_wait_dsecs =
7763 le32_to_cpu(resp->max_bailout_time_after_reset);
7764 fw_health->regs[BNXT_FW_HEALTH_REG] =
7765 le32_to_cpu(resp->fw_health_status_reg);
7766 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7767 le32_to_cpu(resp->fw_heartbeat_reg);
7768 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7769 le32_to_cpu(resp->fw_reset_cnt_reg);
7770 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7771 le32_to_cpu(resp->reset_inprogress_reg);
7772 fw_health->fw_reset_inprog_reg_mask =
7773 le32_to_cpu(resp->reset_inprogress_reg_mask);
7774 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7775 if (fw_health->fw_reset_seq_cnt >= 16) {
7777 goto err_recovery_out;
7779 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7780 fw_health->fw_reset_seq_regs[i] =
7781 le32_to_cpu(resp->reset_reg[i]);
7782 fw_health->fw_reset_seq_vals[i] =
7783 le32_to_cpu(resp->reset_reg_val[i]);
7784 fw_health->fw_reset_seq_delay_msec[i] =
7785 resp->delay_after_reset[i];
7788 hwrm_req_drop(bp, req);
7790 rc = bnxt_map_fw_health_regs(bp);
7792 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7796 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7798 struct hwrm_func_reset_input *req;
7801 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
7806 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
7807 return hwrm_req_send(bp, req);
7810 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7812 struct hwrm_nvm_get_dev_info_output nvm_info;
7814 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7815 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7816 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7817 nvm_info.nvm_cfg_ver_upd);
7820 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7822 struct hwrm_queue_qportcfg_output *resp;
7823 struct hwrm_queue_qportcfg_input *req;
7828 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
7832 resp = hwrm_req_hold(bp, req);
7833 rc = hwrm_req_send(bp, req);
7837 if (!resp->max_configurable_queues) {
7841 bp->max_tc = resp->max_configurable_queues;
7842 bp->max_lltc = resp->max_configurable_lossless_queues;
7843 if (bp->max_tc > BNXT_MAX_QUEUE)
7844 bp->max_tc = BNXT_MAX_QUEUE;
7846 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7847 qptr = &resp->queue_id0;
7848 for (i = 0, j = 0; i < bp->max_tc; i++) {
7849 bp->q_info[j].queue_id = *qptr;
7850 bp->q_ids[i] = *qptr++;
7851 bp->q_info[j].queue_profile = *qptr++;
7852 bp->tc_to_qidx[j] = j;
7853 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7854 (no_rdma && BNXT_PF(bp)))
7857 bp->max_q = bp->max_tc;
7858 bp->max_tc = max_t(u8, j, 1);
7860 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7863 if (bp->max_lltc > bp->max_tc)
7864 bp->max_lltc = bp->max_tc;
7867 hwrm_req_drop(bp, req);
7871 static int bnxt_hwrm_poll(struct bnxt *bp)
7873 struct hwrm_ver_get_input *req;
7876 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7880 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7881 req->hwrm_intf_min = HWRM_VERSION_MINOR;
7882 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7884 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
7885 rc = hwrm_req_send(bp, req);
7889 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7891 struct hwrm_ver_get_output *resp;
7892 struct hwrm_ver_get_input *req;
7893 u16 fw_maj, fw_min, fw_bld, fw_rsv;
7894 u32 dev_caps_cfg, hwrm_ver;
7897 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7901 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
7902 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7903 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7904 req->hwrm_intf_min = HWRM_VERSION_MINOR;
7905 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7907 resp = hwrm_req_hold(bp, req);
7908 rc = hwrm_req_send(bp, req);
7910 goto hwrm_ver_get_exit;
7912 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7914 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7915 resp->hwrm_intf_min_8b << 8 |
7916 resp->hwrm_intf_upd_8b;
7917 if (resp->hwrm_intf_maj_8b < 1) {
7918 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7919 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7920 resp->hwrm_intf_upd_8b);
7921 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7924 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7925 HWRM_VERSION_UPDATE;
7927 if (bp->hwrm_spec_code > hwrm_ver)
7928 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7929 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7930 HWRM_VERSION_UPDATE);
7932 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7933 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7934 resp->hwrm_intf_upd_8b);
7936 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7937 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7938 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7939 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7940 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7941 len = FW_VER_STR_LEN;
7943 fw_maj = resp->hwrm_fw_maj_8b;
7944 fw_min = resp->hwrm_fw_min_8b;
7945 fw_bld = resp->hwrm_fw_bld_8b;
7946 fw_rsv = resp->hwrm_fw_rsvd_8b;
7947 len = BC_HWRM_STR_LEN;
7949 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7950 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7953 if (strlen(resp->active_pkg_name)) {
7954 int fw_ver_len = strlen(bp->fw_ver_str);
7956 snprintf(bp->fw_ver_str + fw_ver_len,
7957 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7958 resp->active_pkg_name);
7959 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7962 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7963 if (!bp->hwrm_cmd_timeout)
7964 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7966 if (resp->hwrm_intf_maj_8b >= 1) {
7967 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7968 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7970 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7971 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7973 bp->chip_num = le16_to_cpu(resp->chip_num);
7974 bp->chip_rev = resp->chip_rev;
7975 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7977 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7979 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7980 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7981 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7982 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7984 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7985 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7988 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7989 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7992 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7993 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7996 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7997 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8000 hwrm_req_drop(bp, req);
8004 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8006 struct hwrm_fw_set_time_input *req;
8008 time64_t now = ktime_get_real_seconds();
8011 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8012 bp->hwrm_spec_code < 0x10400)
8015 time64_to_tm(now, 0, &tm);
8016 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8020 req->year = cpu_to_le16(1900 + tm.tm_year);
8021 req->month = 1 + tm.tm_mon;
8022 req->day = tm.tm_mday;
8023 req->hour = tm.tm_hour;
8024 req->minute = tm.tm_min;
8025 req->second = tm.tm_sec;
8026 return hwrm_req_send(bp, req);
8029 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8034 sw_tmp = (*sw & ~mask) | hw;
8035 if (hw < (*sw & mask))
8037 WRITE_ONCE(*sw, sw_tmp);
8040 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8041 int count, bool ignore_zero)
8045 for (i = 0; i < count; i++) {
8046 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8048 if (ignore_zero && !hw)
8051 if (masks[i] == -1ULL)
8054 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8058 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8060 if (!stats->hw_stats)
8063 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8064 stats->hw_masks, stats->len / 8, false);
8067 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8069 struct bnxt_stats_mem *ring0_stats;
8070 bool ignore_zero = false;
8073 /* Chip bug. Counter intermittently becomes 0. */
8074 if (bp->flags & BNXT_FLAG_CHIP_P5)
8077 for (i = 0; i < bp->cp_nr_rings; i++) {
8078 struct bnxt_napi *bnapi = bp->bnapi[i];
8079 struct bnxt_cp_ring_info *cpr;
8080 struct bnxt_stats_mem *stats;
8082 cpr = &bnapi->cp_ring;
8083 stats = &cpr->stats;
8085 ring0_stats = stats;
8086 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8087 ring0_stats->hw_masks,
8088 ring0_stats->len / 8, ignore_zero);
8090 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8091 struct bnxt_stats_mem *stats = &bp->port_stats;
8092 __le64 *hw_stats = stats->hw_stats;
8093 u64 *sw_stats = stats->sw_stats;
8094 u64 *masks = stats->hw_masks;
8097 cnt = sizeof(struct rx_port_stats) / 8;
8098 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8100 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8101 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8102 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8103 cnt = sizeof(struct tx_port_stats) / 8;
8104 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8106 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8107 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8108 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8112 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8114 struct hwrm_port_qstats_input *req;
8115 struct bnxt_pf_info *pf = &bp->pf;
8118 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8121 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8124 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8129 req->port_id = cpu_to_le16(pf->port_id);
8130 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8131 BNXT_TX_PORT_STATS_BYTE_OFFSET);
8132 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8133 return hwrm_req_send(bp, req);
8136 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8138 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8139 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8140 struct hwrm_port_qstats_ext_output *resp_qs;
8141 struct hwrm_port_qstats_ext_input *req_qs;
8142 struct bnxt_pf_info *pf = &bp->pf;
8146 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8149 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8152 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8156 req_qs->flags = flags;
8157 req_qs->port_id = cpu_to_le16(pf->port_id);
8158 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8159 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8160 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8161 sizeof(struct tx_port_stats_ext) : 0;
8162 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8163 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8164 resp_qs = hwrm_req_hold(bp, req_qs);
8165 rc = hwrm_req_send(bp, req_qs);
8167 bp->fw_rx_stats_ext_size =
8168 le16_to_cpu(resp_qs->rx_stat_size) / 8;
8169 bp->fw_tx_stats_ext_size = tx_stat_size ?
8170 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
8172 bp->fw_rx_stats_ext_size = 0;
8173 bp->fw_tx_stats_ext_size = 0;
8175 hwrm_req_drop(bp, req_qs);
8180 if (bp->fw_tx_stats_ext_size <=
8181 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8182 bp->pri2cos_valid = 0;
8186 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8190 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8192 resp_qc = hwrm_req_hold(bp, req_qc);
8193 rc = hwrm_req_send(bp, req_qc);
8198 pri2cos = &resp_qc->pri0_cos_queue_id;
8199 for (i = 0; i < 8; i++) {
8200 u8 queue_id = pri2cos[i];
8203 /* Per port queue IDs start from 0, 10, 20, etc */
8204 queue_idx = queue_id % 10;
8205 if (queue_idx > BNXT_MAX_QUEUE) {
8206 bp->pri2cos_valid = false;
8207 hwrm_req_drop(bp, req_qc);
8210 for (j = 0; j < bp->max_q; j++) {
8211 if (bp->q_ids[j] == queue_id)
8212 bp->pri2cos_idx[i] = queue_idx;
8215 bp->pri2cos_valid = true;
8217 hwrm_req_drop(bp, req_qc);
8222 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8224 if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
8225 bnxt_hwrm_tunnel_dst_port_free(
8226 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8227 if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
8228 bnxt_hwrm_tunnel_dst_port_free(
8229 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8232 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8238 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8239 else if (BNXT_NO_FW_ACCESS(bp))
8241 for (i = 0; i < bp->nr_vnics; i++) {
8242 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8244 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8252 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8256 for (i = 0; i < bp->nr_vnics; i++)
8257 bnxt_hwrm_vnic_set_rss(bp, i, false);
8260 static void bnxt_clear_vnic(struct bnxt *bp)
8265 bnxt_hwrm_clear_vnic_filter(bp);
8266 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8267 /* clear all RSS setting before free vnic ctx */
8268 bnxt_hwrm_clear_vnic_rss(bp);
8269 bnxt_hwrm_vnic_ctx_free(bp);
8271 /* before free the vnic, undo the vnic tpa settings */
8272 if (bp->flags & BNXT_FLAG_TPA)
8273 bnxt_set_tpa(bp, false);
8274 bnxt_hwrm_vnic_free(bp);
8275 if (bp->flags & BNXT_FLAG_CHIP_P5)
8276 bnxt_hwrm_vnic_ctx_free(bp);
8279 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8282 bnxt_clear_vnic(bp);
8283 bnxt_hwrm_ring_free(bp, close_path);
8284 bnxt_hwrm_ring_grp_free(bp);
8286 bnxt_hwrm_stat_ctx_free(bp);
8287 bnxt_hwrm_free_tunnel_ports(bp);
8291 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8293 struct hwrm_func_cfg_input *req;
8297 if (br_mode == BRIDGE_MODE_VEB)
8298 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8299 else if (br_mode == BRIDGE_MODE_VEPA)
8300 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8304 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8308 req->fid = cpu_to_le16(0xffff);
8309 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8310 req->evb_mode = evb_mode;
8311 return hwrm_req_send(bp, req);
8314 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8316 struct hwrm_func_cfg_input *req;
8319 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8322 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8326 req->fid = cpu_to_le16(0xffff);
8327 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8328 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8330 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8332 return hwrm_req_send(bp, req);
8335 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8337 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8340 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8343 /* allocate context for vnic */
8344 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8346 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8348 goto vnic_setup_err;
8350 bp->rsscos_nr_ctxs++;
8352 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8353 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8355 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8357 goto vnic_setup_err;
8359 bp->rsscos_nr_ctxs++;
8363 /* configure default vnic, ring grp */
8364 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8366 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8368 goto vnic_setup_err;
8371 /* Enable RSS hashing on vnic */
8372 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8374 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8376 goto vnic_setup_err;
8379 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8380 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8382 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8391 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8395 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8396 for (i = 0; i < nr_ctxs; i++) {
8397 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8399 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8403 bp->rsscos_nr_ctxs++;
8408 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8410 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8414 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8416 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8420 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8421 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8423 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8430 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8432 if (bp->flags & BNXT_FLAG_CHIP_P5)
8433 return __bnxt_setup_vnic_p5(bp, vnic_id);
8435 return __bnxt_setup_vnic(bp, vnic_id);
8438 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8440 #ifdef CONFIG_RFS_ACCEL
8443 if (bp->flags & BNXT_FLAG_CHIP_P5)
8446 for (i = 0; i < bp->rx_nr_rings; i++) {
8447 struct bnxt_vnic_info *vnic;
8448 u16 vnic_id = i + 1;
8451 if (vnic_id >= bp->nr_vnics)
8454 vnic = &bp->vnic_info[vnic_id];
8455 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8456 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8457 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8458 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8460 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8464 rc = bnxt_setup_vnic(bp, vnic_id);
8474 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8475 static bool bnxt_promisc_ok(struct bnxt *bp)
8477 #ifdef CONFIG_BNXT_SRIOV
8478 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8484 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8486 unsigned int rc = 0;
8488 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8490 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8495 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8497 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8504 static int bnxt_cfg_rx_mode(struct bnxt *);
8505 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8507 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8509 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8511 unsigned int rx_nr_rings = bp->rx_nr_rings;
8514 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8516 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8522 rc = bnxt_hwrm_ring_alloc(bp);
8524 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8528 rc = bnxt_hwrm_ring_grp_alloc(bp);
8530 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8534 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8537 /* default vnic 0 */
8538 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8540 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8544 rc = bnxt_setup_vnic(bp, 0);
8548 if (bp->flags & BNXT_FLAG_RFS) {
8549 rc = bnxt_alloc_rfs_vnics(bp);
8554 if (bp->flags & BNXT_FLAG_TPA) {
8555 rc = bnxt_set_tpa(bp, true);
8561 bnxt_update_vf_mac(bp);
8563 /* Filter for default vnic 0 */
8564 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8566 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8569 vnic->uc_filter_count = 1;
8572 if (bp->dev->flags & IFF_BROADCAST)
8573 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8575 if (bp->dev->flags & IFF_PROMISC)
8576 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8578 if (bp->dev->flags & IFF_ALLMULTI) {
8579 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8580 vnic->mc_list_count = 0;
8584 bnxt_mc_list_updated(bp, &mask);
8585 vnic->rx_mask |= mask;
8588 rc = bnxt_cfg_rx_mode(bp);
8592 rc = bnxt_hwrm_set_coal(bp);
8594 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8597 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8598 rc = bnxt_setup_nitroa0_vnic(bp);
8600 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8605 bnxt_hwrm_func_qcfg(bp);
8606 netdev_update_features(bp->dev);
8612 bnxt_hwrm_resource_free(bp, 0, true);
8617 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8619 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8623 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8625 bnxt_init_cp_rings(bp);
8626 bnxt_init_rx_rings(bp);
8627 bnxt_init_tx_rings(bp);
8628 bnxt_init_ring_grps(bp, irq_re_init);
8629 bnxt_init_vnics(bp);
8631 return bnxt_init_chip(bp, irq_re_init);
8634 static int bnxt_set_real_num_queues(struct bnxt *bp)
8637 struct net_device *dev = bp->dev;
8639 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8640 bp->tx_nr_rings_xdp);
8644 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8648 #ifdef CONFIG_RFS_ACCEL
8649 if (bp->flags & BNXT_FLAG_RFS)
8650 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8656 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8659 int _rx = *rx, _tx = *tx;
8662 *rx = min_t(int, _rx, max);
8663 *tx = min_t(int, _tx, max);
8668 while (_rx + _tx > max) {
8669 if (_rx > _tx && _rx > 1)
8680 static void bnxt_setup_msix(struct bnxt *bp)
8682 const int len = sizeof(bp->irq_tbl[0].name);
8683 struct net_device *dev = bp->dev;
8686 tcs = netdev_get_num_tc(dev);
8690 for (i = 0; i < tcs; i++) {
8691 count = bp->tx_nr_rings_per_tc;
8693 netdev_set_tc_queue(dev, i, count, off);
8697 for (i = 0; i < bp->cp_nr_rings; i++) {
8698 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8701 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8703 else if (i < bp->rx_nr_rings)
8708 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8710 bp->irq_tbl[map_idx].handler = bnxt_msix;
8714 static void bnxt_setup_inta(struct bnxt *bp)
8716 const int len = sizeof(bp->irq_tbl[0].name);
8718 if (netdev_get_num_tc(bp->dev))
8719 netdev_reset_tc(bp->dev);
8721 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8723 bp->irq_tbl[0].handler = bnxt_inta;
8726 static int bnxt_init_int_mode(struct bnxt *bp);
8728 static int bnxt_setup_int_mode(struct bnxt *bp)
8733 rc = bnxt_init_int_mode(bp);
8734 if (rc || !bp->irq_tbl)
8735 return rc ?: -ENODEV;
8738 if (bp->flags & BNXT_FLAG_USING_MSIX)
8739 bnxt_setup_msix(bp);
8741 bnxt_setup_inta(bp);
8743 rc = bnxt_set_real_num_queues(bp);
8747 #ifdef CONFIG_RFS_ACCEL
8748 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8750 return bp->hw_resc.max_rsscos_ctxs;
8753 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8755 return bp->hw_resc.max_vnics;
8759 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8761 return bp->hw_resc.max_stat_ctxs;
8764 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8766 return bp->hw_resc.max_cp_rings;
8769 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8771 unsigned int cp = bp->hw_resc.max_cp_rings;
8773 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8774 cp -= bnxt_get_ulp_msix_num(bp);
8779 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8781 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8783 if (bp->flags & BNXT_FLAG_CHIP_P5)
8784 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8786 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8789 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8791 bp->hw_resc.max_irqs = max_irqs;
8794 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8798 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8799 if (bp->flags & BNXT_FLAG_CHIP_P5)
8800 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8802 return cp - bp->cp_nr_rings;
8805 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8807 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8810 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8812 int max_cp = bnxt_get_max_func_cp_rings(bp);
8813 int max_irq = bnxt_get_max_func_irqs(bp);
8814 int total_req = bp->cp_nr_rings + num;
8815 int max_idx, avail_msix;
8817 max_idx = bp->total_irqs;
8818 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8819 max_idx = min_t(int, bp->total_irqs, max_cp);
8820 avail_msix = max_idx - bp->cp_nr_rings;
8821 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8824 if (max_irq < total_req) {
8825 num = max_irq - bp->cp_nr_rings;
8832 static int bnxt_get_num_msix(struct bnxt *bp)
8834 if (!BNXT_NEW_RM(bp))
8835 return bnxt_get_max_func_irqs(bp);
8837 return bnxt_nq_rings_in_use(bp);
8840 static int bnxt_init_msix(struct bnxt *bp)
8842 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8843 struct msix_entry *msix_ent;
8845 total_vecs = bnxt_get_num_msix(bp);
8846 max = bnxt_get_max_func_irqs(bp);
8847 if (total_vecs > max)
8853 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8857 for (i = 0; i < total_vecs; i++) {
8858 msix_ent[i].entry = i;
8859 msix_ent[i].vector = 0;
8862 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8865 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8866 ulp_msix = bnxt_get_ulp_msix_num(bp);
8867 if (total_vecs < 0 || total_vecs < ulp_msix) {
8869 goto msix_setup_exit;
8872 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8874 for (i = 0; i < total_vecs; i++)
8875 bp->irq_tbl[i].vector = msix_ent[i].vector;
8877 bp->total_irqs = total_vecs;
8878 /* Trim rings based upon num of vectors allocated */
8879 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8880 total_vecs - ulp_msix, min == 1);
8882 goto msix_setup_exit;
8884 bp->cp_nr_rings = (min == 1) ?
8885 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8886 bp->tx_nr_rings + bp->rx_nr_rings;
8890 goto msix_setup_exit;
8892 bp->flags |= BNXT_FLAG_USING_MSIX;
8897 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8900 pci_disable_msix(bp->pdev);
8905 static int bnxt_init_inta(struct bnxt *bp)
8907 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
8912 bp->rx_nr_rings = 1;
8913 bp->tx_nr_rings = 1;
8914 bp->cp_nr_rings = 1;
8915 bp->flags |= BNXT_FLAG_SHARED_RINGS;
8916 bp->irq_tbl[0].vector = bp->pdev->irq;
8920 static int bnxt_init_int_mode(struct bnxt *bp)
8924 if (bp->flags & BNXT_FLAG_MSIX_CAP)
8925 rc = bnxt_init_msix(bp);
8927 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8928 /* fallback to INTA */
8929 rc = bnxt_init_inta(bp);
8934 static void bnxt_clear_int_mode(struct bnxt *bp)
8936 if (bp->flags & BNXT_FLAG_USING_MSIX)
8937 pci_disable_msix(bp->pdev);
8941 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8944 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8946 int tcs = netdev_get_num_tc(bp->dev);
8947 bool irq_cleared = false;
8950 if (!bnxt_need_reserve_rings(bp))
8953 if (irq_re_init && BNXT_NEW_RM(bp) &&
8954 bnxt_get_num_msix(bp) != bp->total_irqs) {
8955 bnxt_ulp_irq_stop(bp);
8956 bnxt_clear_int_mode(bp);
8959 rc = __bnxt_reserve_rings(bp);
8962 rc = bnxt_init_int_mode(bp);
8963 bnxt_ulp_irq_restart(bp, rc);
8966 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8969 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8970 netdev_err(bp->dev, "tx ring reservation failure\n");
8971 netdev_reset_tc(bp->dev);
8972 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8978 static void bnxt_free_irq(struct bnxt *bp)
8980 struct bnxt_irq *irq;
8983 #ifdef CONFIG_RFS_ACCEL
8984 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8985 bp->dev->rx_cpu_rmap = NULL;
8987 if (!bp->irq_tbl || !bp->bnapi)
8990 for (i = 0; i < bp->cp_nr_rings; i++) {
8991 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8993 irq = &bp->irq_tbl[map_idx];
8994 if (irq->requested) {
8995 if (irq->have_cpumask) {
8996 irq_set_affinity_hint(irq->vector, NULL);
8997 free_cpumask_var(irq->cpu_mask);
8998 irq->have_cpumask = 0;
9000 free_irq(irq->vector, bp->bnapi[i]);
9007 static int bnxt_request_irq(struct bnxt *bp)
9010 unsigned long flags = 0;
9011 #ifdef CONFIG_RFS_ACCEL
9012 struct cpu_rmap *rmap;
9015 rc = bnxt_setup_int_mode(bp);
9017 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9021 #ifdef CONFIG_RFS_ACCEL
9022 rmap = bp->dev->rx_cpu_rmap;
9024 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9025 flags = IRQF_SHARED;
9027 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9028 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9029 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9031 #ifdef CONFIG_RFS_ACCEL
9032 if (rmap && bp->bnapi[i]->rx_ring) {
9033 rc = irq_cpu_rmap_add(rmap, irq->vector);
9035 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9040 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9047 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9048 int numa_node = dev_to_node(&bp->pdev->dev);
9050 irq->have_cpumask = 1;
9051 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9053 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9055 netdev_warn(bp->dev,
9056 "Set affinity failed, IRQ = %d\n",
9065 static void bnxt_del_napi(struct bnxt *bp)
9072 for (i = 0; i < bp->cp_nr_rings; i++) {
9073 struct bnxt_napi *bnapi = bp->bnapi[i];
9075 __netif_napi_del(&bnapi->napi);
9077 /* We called __netif_napi_del(), we need
9078 * to respect an RCU grace period before freeing napi structures.
9083 static void bnxt_init_napi(struct bnxt *bp)
9086 unsigned int cp_nr_rings = bp->cp_nr_rings;
9087 struct bnxt_napi *bnapi;
9089 if (bp->flags & BNXT_FLAG_USING_MSIX) {
9090 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9092 if (bp->flags & BNXT_FLAG_CHIP_P5)
9093 poll_fn = bnxt_poll_p5;
9094 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9096 for (i = 0; i < cp_nr_rings; i++) {
9097 bnapi = bp->bnapi[i];
9098 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
9100 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9101 bnapi = bp->bnapi[cp_nr_rings];
9102 netif_napi_add(bp->dev, &bnapi->napi,
9103 bnxt_poll_nitroa0, 64);
9106 bnapi = bp->bnapi[0];
9107 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
9111 static void bnxt_disable_napi(struct bnxt *bp)
9116 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9119 for (i = 0; i < bp->cp_nr_rings; i++) {
9120 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9122 napi_disable(&bp->bnapi[i]->napi);
9123 if (bp->bnapi[i]->rx_ring)
9124 cancel_work_sync(&cpr->dim.work);
9128 static void bnxt_enable_napi(struct bnxt *bp)
9132 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9133 for (i = 0; i < bp->cp_nr_rings; i++) {
9134 struct bnxt_napi *bnapi = bp->bnapi[i];
9135 struct bnxt_cp_ring_info *cpr;
9137 cpr = &bnapi->cp_ring;
9138 if (bnapi->in_reset)
9139 cpr->sw_stats.rx.rx_resets++;
9140 bnapi->in_reset = false;
9142 if (bnapi->rx_ring) {
9143 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9144 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9146 napi_enable(&bnapi->napi);
9150 void bnxt_tx_disable(struct bnxt *bp)
9153 struct bnxt_tx_ring_info *txr;
9156 for (i = 0; i < bp->tx_nr_rings; i++) {
9157 txr = &bp->tx_ring[i];
9158 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
9161 /* Make sure napi polls see @dev_state change */
9163 /* Drop carrier first to prevent TX timeout */
9164 netif_carrier_off(bp->dev);
9165 /* Stop all TX queues */
9166 netif_tx_disable(bp->dev);
9169 void bnxt_tx_enable(struct bnxt *bp)
9172 struct bnxt_tx_ring_info *txr;
9174 for (i = 0; i < bp->tx_nr_rings; i++) {
9175 txr = &bp->tx_ring[i];
9176 WRITE_ONCE(txr->dev_state, 0);
9178 /* Make sure napi polls see @dev_state change */
9180 netif_tx_wake_all_queues(bp->dev);
9181 if (bp->link_info.link_up)
9182 netif_carrier_on(bp->dev);
9185 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9187 u8 active_fec = link_info->active_fec_sig_mode &
9188 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9190 switch (active_fec) {
9192 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9194 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9195 return "Clause 74 BaseR";
9196 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9197 return "Clause 91 RS(528,514)";
9198 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9199 return "Clause 91 RS544_1XN";
9200 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9201 return "Clause 91 RS(544,514)";
9202 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9203 return "Clause 91 RS272_1XN";
9204 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9205 return "Clause 91 RS(272,257)";
9209 static void bnxt_report_link(struct bnxt *bp)
9211 if (bp->link_info.link_up) {
9212 const char *signal = "";
9213 const char *flow_ctrl;
9218 netif_carrier_on(bp->dev);
9219 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9220 if (speed == SPEED_UNKNOWN) {
9221 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9224 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9228 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9229 flow_ctrl = "ON - receive & transmit";
9230 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9231 flow_ctrl = "ON - transmit";
9232 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9233 flow_ctrl = "ON - receive";
9236 if (bp->link_info.phy_qcfg_resp.option_flags &
9237 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9238 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9239 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9241 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9244 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9251 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9252 speed, signal, duplex, flow_ctrl);
9253 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9254 netdev_info(bp->dev, "EEE is %s\n",
9255 bp->eee.eee_active ? "active" :
9257 fec = bp->link_info.fec_cfg;
9258 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9259 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9260 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9261 bnxt_report_fec(&bp->link_info));
9263 netif_carrier_off(bp->dev);
9264 netdev_err(bp->dev, "NIC Link is Down\n");
9268 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9270 if (!resp->supported_speeds_auto_mode &&
9271 !resp->supported_speeds_force_mode &&
9272 !resp->supported_pam4_speeds_auto_mode &&
9273 !resp->supported_pam4_speeds_force_mode)
9278 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9280 struct bnxt_link_info *link_info = &bp->link_info;
9281 struct hwrm_port_phy_qcaps_output *resp;
9282 struct hwrm_port_phy_qcaps_input *req;
9285 if (bp->hwrm_spec_code < 0x10201)
9288 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9292 resp = hwrm_req_hold(bp, req);
9293 rc = hwrm_req_send(bp, req);
9295 goto hwrm_phy_qcaps_exit;
9297 bp->phy_flags = resp->flags;
9298 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9299 struct ethtool_eee *eee = &bp->eee;
9300 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9302 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9303 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9304 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9305 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9306 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9309 if (bp->hwrm_spec_code >= 0x10a01) {
9310 if (bnxt_phy_qcaps_no_speed(resp)) {
9311 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9312 netdev_warn(bp->dev, "Ethernet link disabled\n");
9313 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9314 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9315 netdev_info(bp->dev, "Ethernet link enabled\n");
9316 /* Phy re-enabled, reprobe the speeds */
9317 link_info->support_auto_speeds = 0;
9318 link_info->support_pam4_auto_speeds = 0;
9321 if (resp->supported_speeds_auto_mode)
9322 link_info->support_auto_speeds =
9323 le16_to_cpu(resp->supported_speeds_auto_mode);
9324 if (resp->supported_pam4_speeds_auto_mode)
9325 link_info->support_pam4_auto_speeds =
9326 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9328 bp->port_count = resp->port_cnt;
9330 hwrm_phy_qcaps_exit:
9331 hwrm_req_drop(bp, req);
9335 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9337 u16 diff = advertising ^ supported;
9339 return ((supported | diff) != supported);
9342 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9344 struct bnxt_link_info *link_info = &bp->link_info;
9345 struct hwrm_port_phy_qcfg_output *resp;
9346 struct hwrm_port_phy_qcfg_input *req;
9347 u8 link_up = link_info->link_up;
9348 bool support_changed = false;
9351 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9355 resp = hwrm_req_hold(bp, req);
9356 rc = hwrm_req_send(bp, req);
9358 hwrm_req_drop(bp, req);
9362 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9363 link_info->phy_link_status = resp->link;
9364 link_info->duplex = resp->duplex_cfg;
9365 if (bp->hwrm_spec_code >= 0x10800)
9366 link_info->duplex = resp->duplex_state;
9367 link_info->pause = resp->pause;
9368 link_info->auto_mode = resp->auto_mode;
9369 link_info->auto_pause_setting = resp->auto_pause;
9370 link_info->lp_pause = resp->link_partner_adv_pause;
9371 link_info->force_pause_setting = resp->force_pause;
9372 link_info->duplex_setting = resp->duplex_cfg;
9373 if (link_info->phy_link_status == BNXT_LINK_LINK)
9374 link_info->link_speed = le16_to_cpu(resp->link_speed);
9376 link_info->link_speed = 0;
9377 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9378 link_info->force_pam4_link_speed =
9379 le16_to_cpu(resp->force_pam4_link_speed);
9380 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9381 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9382 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9383 link_info->auto_pam4_link_speeds =
9384 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9385 link_info->lp_auto_link_speeds =
9386 le16_to_cpu(resp->link_partner_adv_speeds);
9387 link_info->lp_auto_pam4_link_speeds =
9388 resp->link_partner_pam4_adv_speeds;
9389 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9390 link_info->phy_ver[0] = resp->phy_maj;
9391 link_info->phy_ver[1] = resp->phy_min;
9392 link_info->phy_ver[2] = resp->phy_bld;
9393 link_info->media_type = resp->media_type;
9394 link_info->phy_type = resp->phy_type;
9395 link_info->transceiver = resp->xcvr_pkg_type;
9396 link_info->phy_addr = resp->eee_config_phy_addr &
9397 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9398 link_info->module_status = resp->module_status;
9400 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9401 struct ethtool_eee *eee = &bp->eee;
9404 eee->eee_active = 0;
9405 if (resp->eee_config_phy_addr &
9406 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9407 eee->eee_active = 1;
9408 fw_speeds = le16_to_cpu(
9409 resp->link_partner_adv_eee_link_speed_mask);
9410 eee->lp_advertised =
9411 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9414 /* Pull initial EEE config */
9415 if (!chng_link_state) {
9416 if (resp->eee_config_phy_addr &
9417 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9418 eee->eee_enabled = 1;
9420 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9422 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9424 if (resp->eee_config_phy_addr &
9425 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9428 eee->tx_lpi_enabled = 1;
9429 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9430 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9431 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9436 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9437 if (bp->hwrm_spec_code >= 0x10504) {
9438 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9439 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9441 /* TODO: need to add more logic to report VF link */
9442 if (chng_link_state) {
9443 if (link_info->phy_link_status == BNXT_LINK_LINK)
9444 link_info->link_up = 1;
9446 link_info->link_up = 0;
9447 if (link_up != link_info->link_up)
9448 bnxt_report_link(bp);
9450 /* alwasy link down if not require to update link state */
9451 link_info->link_up = 0;
9453 hwrm_req_drop(bp, req);
9455 if (!BNXT_PHY_CFG_ABLE(bp))
9458 /* Check if any advertised speeds are no longer supported. The caller
9459 * holds the link_lock mutex, so we can modify link_info settings.
9461 if (bnxt_support_dropped(link_info->advertising,
9462 link_info->support_auto_speeds)) {
9463 link_info->advertising = link_info->support_auto_speeds;
9464 support_changed = true;
9466 if (bnxt_support_dropped(link_info->advertising_pam4,
9467 link_info->support_pam4_auto_speeds)) {
9468 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9469 support_changed = true;
9471 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9472 bnxt_hwrm_set_link_setting(bp, true, false);
9476 static void bnxt_get_port_module_status(struct bnxt *bp)
9478 struct bnxt_link_info *link_info = &bp->link_info;
9479 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9482 if (bnxt_update_link(bp, true))
9485 module_status = link_info->module_status;
9486 switch (module_status) {
9487 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9488 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9489 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9490 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9492 if (bp->hwrm_spec_code >= 0x10201) {
9493 netdev_warn(bp->dev, "Module part number %s\n",
9494 resp->phy_vendor_partnumber);
9496 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9497 netdev_warn(bp->dev, "TX is disabled\n");
9498 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9499 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9504 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9506 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9507 if (bp->hwrm_spec_code >= 0x10201)
9509 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9510 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9511 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9512 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9513 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9515 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9517 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9518 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9519 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9520 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9522 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9523 if (bp->hwrm_spec_code >= 0x10201) {
9524 req->auto_pause = req->force_pause;
9525 req->enables |= cpu_to_le32(
9526 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9531 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9533 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9534 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9535 if (bp->link_info.advertising) {
9536 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9537 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9539 if (bp->link_info.advertising_pam4) {
9541 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9542 req->auto_link_pam4_speed_mask =
9543 cpu_to_le16(bp->link_info.advertising_pam4);
9545 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9546 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9548 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9549 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9550 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9551 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9553 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9557 /* tell chimp that the setting takes effect immediately */
9558 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9561 int bnxt_hwrm_set_pause(struct bnxt *bp)
9563 struct hwrm_port_phy_cfg_input *req;
9566 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9570 bnxt_hwrm_set_pause_common(bp, req);
9572 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9573 bp->link_info.force_link_chng)
9574 bnxt_hwrm_set_link_common(bp, req);
9576 rc = hwrm_req_send(bp, req);
9577 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9578 /* since changing of pause setting doesn't trigger any link
9579 * change event, the driver needs to update the current pause
9580 * result upon successfully return of the phy_cfg command
9582 bp->link_info.pause =
9583 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9584 bp->link_info.auto_pause_setting = 0;
9585 if (!bp->link_info.force_link_chng)
9586 bnxt_report_link(bp);
9588 bp->link_info.force_link_chng = false;
9592 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9593 struct hwrm_port_phy_cfg_input *req)
9595 struct ethtool_eee *eee = &bp->eee;
9597 if (eee->eee_enabled) {
9599 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9601 if (eee->tx_lpi_enabled)
9602 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9604 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9606 req->flags |= cpu_to_le32(flags);
9607 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9608 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9609 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9611 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9615 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9617 struct hwrm_port_phy_cfg_input *req;
9620 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9625 bnxt_hwrm_set_pause_common(bp, req);
9627 bnxt_hwrm_set_link_common(bp, req);
9630 bnxt_hwrm_set_eee(bp, req);
9631 return hwrm_req_send(bp, req);
9634 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9636 struct hwrm_port_phy_cfg_input *req;
9639 if (!BNXT_SINGLE_PF(bp))
9642 if (pci_num_vf(bp->pdev) &&
9643 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9646 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9650 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9651 return hwrm_req_send(bp, req);
9654 static int bnxt_fw_init_one(struct bnxt *bp);
9656 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9658 #ifdef CONFIG_TEE_BNXT_FW
9659 int rc = tee_bnxt_fw_load();
9662 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9666 netdev_err(bp->dev, "OP-TEE not supported\n");
9671 static int bnxt_try_recover_fw(struct bnxt *bp)
9673 if (bp->fw_health && bp->fw_health->status_reliable) {
9678 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9679 rc = bnxt_hwrm_poll(bp);
9680 if (!BNXT_FW_IS_BOOTING(sts) &&
9681 !BNXT_FW_IS_RECOVERING(sts))
9684 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9686 if (!BNXT_FW_IS_HEALTHY(sts)) {
9688 "Firmware not responding, status: 0x%x\n",
9692 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9693 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9694 return bnxt_fw_reset_via_optee(bp);
9702 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9704 struct hwrm_func_drv_if_change_output *resp;
9705 struct hwrm_func_drv_if_change_input *req;
9706 bool fw_reset = !bp->irq_tbl;
9707 bool resc_reinit = false;
9711 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9714 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
9719 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9720 resp = hwrm_req_hold(bp, req);
9722 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
9723 while (retry < BNXT_FW_IF_RETRY) {
9724 rc = hwrm_req_send(bp, req);
9732 if (rc == -EAGAIN) {
9733 hwrm_req_drop(bp, req);
9736 flags = le32_to_cpu(resp->flags);
9738 rc = bnxt_try_recover_fw(bp);
9741 hwrm_req_drop(bp, req);
9746 bnxt_inv_fw_health_reg(bp);
9750 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9752 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9754 else if (bp->fw_health && !bp->fw_health->status_reliable)
9755 bnxt_try_map_fw_health_reg(bp);
9757 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9758 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9759 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9762 if (resc_reinit || fw_reset) {
9764 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9765 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9767 bnxt_free_ctx_mem(bp);
9771 rc = bnxt_fw_init_one(bp);
9773 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9774 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9777 bnxt_clear_int_mode(bp);
9778 rc = bnxt_init_int_mode(bp);
9780 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9781 netdev_err(bp->dev, "init int mode failed\n");
9785 if (BNXT_NEW_RM(bp)) {
9786 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9788 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9790 netdev_err(bp->dev, "resc_qcaps failed\n");
9792 hw_resc->resv_cp_rings = 0;
9793 hw_resc->resv_stat_ctxs = 0;
9794 hw_resc->resv_irqs = 0;
9795 hw_resc->resv_tx_rings = 0;
9796 hw_resc->resv_rx_rings = 0;
9797 hw_resc->resv_hw_ring_grps = 0;
9798 hw_resc->resv_vnics = 0;
9800 bp->tx_nr_rings = 0;
9801 bp->rx_nr_rings = 0;
9808 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9810 struct hwrm_port_led_qcaps_output *resp;
9811 struct hwrm_port_led_qcaps_input *req;
9812 struct bnxt_pf_info *pf = &bp->pf;
9816 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9819 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
9823 req->port_id = cpu_to_le16(pf->port_id);
9824 resp = hwrm_req_hold(bp, req);
9825 rc = hwrm_req_send(bp, req);
9827 hwrm_req_drop(bp, req);
9830 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9833 bp->num_leds = resp->num_leds;
9834 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9836 for (i = 0; i < bp->num_leds; i++) {
9837 struct bnxt_led_info *led = &bp->leds[i];
9838 __le16 caps = led->led_state_caps;
9840 if (!led->led_group_id ||
9841 !BNXT_LED_ALT_BLINK_CAP(caps)) {
9847 hwrm_req_drop(bp, req);
9851 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9853 struct hwrm_wol_filter_alloc_output *resp;
9854 struct hwrm_wol_filter_alloc_input *req;
9857 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
9861 req->port_id = cpu_to_le16(bp->pf.port_id);
9862 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9863 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9864 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
9866 resp = hwrm_req_hold(bp, req);
9867 rc = hwrm_req_send(bp, req);
9869 bp->wol_filter_id = resp->wol_filter_id;
9870 hwrm_req_drop(bp, req);
9874 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9876 struct hwrm_wol_filter_free_input *req;
9879 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
9883 req->port_id = cpu_to_le16(bp->pf.port_id);
9884 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9885 req->wol_filter_id = bp->wol_filter_id;
9887 return hwrm_req_send(bp, req);
9890 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9892 struct hwrm_wol_filter_qcfg_output *resp;
9893 struct hwrm_wol_filter_qcfg_input *req;
9894 u16 next_handle = 0;
9897 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
9901 req->port_id = cpu_to_le16(bp->pf.port_id);
9902 req->handle = cpu_to_le16(handle);
9903 resp = hwrm_req_hold(bp, req);
9904 rc = hwrm_req_send(bp, req);
9906 next_handle = le16_to_cpu(resp->next_handle);
9907 if (next_handle != 0) {
9908 if (resp->wol_type ==
9909 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9911 bp->wol_filter_id = resp->wol_filter_id;
9915 hwrm_req_drop(bp, req);
9919 static void bnxt_get_wol_settings(struct bnxt *bp)
9924 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9928 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9929 } while (handle && handle != 0xffff);
9932 #ifdef CONFIG_BNXT_HWMON
9933 static ssize_t bnxt_show_temp(struct device *dev,
9934 struct device_attribute *devattr, char *buf)
9936 struct hwrm_temp_monitor_query_output *resp;
9937 struct hwrm_temp_monitor_query_input *req;
9938 struct bnxt *bp = dev_get_drvdata(dev);
9942 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
9945 resp = hwrm_req_hold(bp, req);
9946 rc = hwrm_req_send(bp, req);
9948 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9949 hwrm_req_drop(bp, req);
9954 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9956 static struct attribute *bnxt_attrs[] = {
9957 &sensor_dev_attr_temp1_input.dev_attr.attr,
9960 ATTRIBUTE_GROUPS(bnxt);
9962 static void bnxt_hwmon_close(struct bnxt *bp)
9964 if (bp->hwmon_dev) {
9965 hwmon_device_unregister(bp->hwmon_dev);
9966 bp->hwmon_dev = NULL;
9970 static void bnxt_hwmon_open(struct bnxt *bp)
9972 struct hwrm_temp_monitor_query_input *req;
9973 struct pci_dev *pdev = bp->pdev;
9976 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
9978 rc = hwrm_req_send_silent(bp, req);
9979 if (rc == -EACCES || rc == -EOPNOTSUPP) {
9980 bnxt_hwmon_close(bp);
9987 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9988 DRV_MODULE_NAME, bp,
9990 if (IS_ERR(bp->hwmon_dev)) {
9991 bp->hwmon_dev = NULL;
9992 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9996 static void bnxt_hwmon_close(struct bnxt *bp)
10000 static void bnxt_hwmon_open(struct bnxt *bp)
10005 static bool bnxt_eee_config_ok(struct bnxt *bp)
10007 struct ethtool_eee *eee = &bp->eee;
10008 struct bnxt_link_info *link_info = &bp->link_info;
10010 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10013 if (eee->eee_enabled) {
10015 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10017 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10018 eee->eee_enabled = 0;
10021 if (eee->advertised & ~advertising) {
10022 eee->advertised = advertising & eee->supported;
10029 static int bnxt_update_phy_setting(struct bnxt *bp)
10032 bool update_link = false;
10033 bool update_pause = false;
10034 bool update_eee = false;
10035 struct bnxt_link_info *link_info = &bp->link_info;
10037 rc = bnxt_update_link(bp, true);
10039 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10043 if (!BNXT_SINGLE_PF(bp))
10046 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10047 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10048 link_info->req_flow_ctrl)
10049 update_pause = true;
10050 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10051 link_info->force_pause_setting != link_info->req_flow_ctrl)
10052 update_pause = true;
10053 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10054 if (BNXT_AUTO_MODE(link_info->auto_mode))
10055 update_link = true;
10056 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10057 link_info->req_link_speed != link_info->force_link_speed)
10058 update_link = true;
10059 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10060 link_info->req_link_speed != link_info->force_pam4_link_speed)
10061 update_link = true;
10062 if (link_info->req_duplex != link_info->duplex_setting)
10063 update_link = true;
10065 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10066 update_link = true;
10067 if (link_info->advertising != link_info->auto_link_speeds ||
10068 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10069 update_link = true;
10072 /* The last close may have shutdown the link, so need to call
10073 * PHY_CFG to bring it back up.
10075 if (!bp->link_info.link_up)
10076 update_link = true;
10078 if (!bnxt_eee_config_ok(bp))
10082 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10083 else if (update_pause)
10084 rc = bnxt_hwrm_set_pause(bp);
10086 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10094 /* Common routine to pre-map certain register block to different GRC window.
10095 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10096 * in PF and 3 windows in VF that can be customized to map in different
10099 static void bnxt_preset_reg_win(struct bnxt *bp)
10102 /* CAG registers map to GRC window #4 */
10103 writel(BNXT_CAG_REG_BASE,
10104 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10108 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10110 static int bnxt_reinit_after_abort(struct bnxt *bp)
10114 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10117 if (bp->dev->reg_state == NETREG_UNREGISTERED)
10120 rc = bnxt_fw_init_one(bp);
10122 bnxt_clear_int_mode(bp);
10123 rc = bnxt_init_int_mode(bp);
10125 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10126 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10132 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10136 bnxt_preset_reg_win(bp);
10137 netif_carrier_off(bp->dev);
10139 /* Reserve rings now if none were reserved at driver probe. */
10140 rc = bnxt_init_dflt_ring_mode(bp);
10142 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10146 rc = bnxt_reserve_rings(bp, irq_re_init);
10149 if ((bp->flags & BNXT_FLAG_RFS) &&
10150 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10151 /* disable RFS if falling back to INTA */
10152 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10153 bp->flags &= ~BNXT_FLAG_RFS;
10156 rc = bnxt_alloc_mem(bp, irq_re_init);
10158 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10159 goto open_err_free_mem;
10163 bnxt_init_napi(bp);
10164 rc = bnxt_request_irq(bp);
10166 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10171 rc = bnxt_init_nic(bp, irq_re_init);
10173 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10177 bnxt_enable_napi(bp);
10178 bnxt_debug_dev_init(bp);
10180 if (link_re_init) {
10181 mutex_lock(&bp->link_lock);
10182 rc = bnxt_update_phy_setting(bp);
10183 mutex_unlock(&bp->link_lock);
10185 netdev_warn(bp->dev, "failed to update phy settings\n");
10186 if (BNXT_SINGLE_PF(bp)) {
10187 bp->link_info.phy_retry = true;
10188 bp->link_info.phy_retry_expires =
10195 udp_tunnel_nic_reset_ntf(bp->dev);
10197 set_bit(BNXT_STATE_OPEN, &bp->state);
10198 bnxt_enable_int(bp);
10199 /* Enable TX queues */
10200 bnxt_tx_enable(bp);
10201 mod_timer(&bp->timer, jiffies + bp->current_interval);
10202 /* Poll link status and check for SFP+ module status */
10203 mutex_lock(&bp->link_lock);
10204 bnxt_get_port_module_status(bp);
10205 mutex_unlock(&bp->link_lock);
10207 /* VF-reps may need to be re-opened after the PF is re-opened */
10209 bnxt_vf_reps_open(bp);
10216 bnxt_free_skbs(bp);
10218 bnxt_free_mem(bp, true);
10222 /* rtnl_lock held */
10223 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10227 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10230 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10232 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10233 dev_close(bp->dev);
10238 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10239 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10242 int bnxt_half_open_nic(struct bnxt *bp)
10246 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10247 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10249 goto half_open_err;
10252 rc = bnxt_alloc_mem(bp, false);
10254 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10255 goto half_open_err;
10257 rc = bnxt_init_nic(bp, false);
10259 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10260 goto half_open_err;
10265 bnxt_free_skbs(bp);
10266 bnxt_free_mem(bp, false);
10267 dev_close(bp->dev);
10271 /* rtnl_lock held, this call can only be made after a previous successful
10272 * call to bnxt_half_open_nic().
10274 void bnxt_half_close_nic(struct bnxt *bp)
10276 bnxt_hwrm_resource_free(bp, false, false);
10277 bnxt_free_skbs(bp);
10278 bnxt_free_mem(bp, false);
10281 static void bnxt_reenable_sriov(struct bnxt *bp)
10284 struct bnxt_pf_info *pf = &bp->pf;
10285 int n = pf->active_vfs;
10288 bnxt_cfg_hw_sriov(bp, &n, true);
10292 static int bnxt_open(struct net_device *dev)
10294 struct bnxt *bp = netdev_priv(dev);
10297 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10298 rc = bnxt_reinit_after_abort(bp);
10301 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10303 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10308 rc = bnxt_hwrm_if_change(bp, true);
10312 rc = __bnxt_open_nic(bp, true, true);
10314 bnxt_hwrm_if_change(bp, false);
10316 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10317 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10318 bnxt_ulp_start(bp, 0);
10319 bnxt_reenable_sriov(bp);
10322 bnxt_hwmon_open(bp);
10328 static bool bnxt_drv_busy(struct bnxt *bp)
10330 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10331 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10334 static void bnxt_get_ring_stats(struct bnxt *bp,
10335 struct rtnl_link_stats64 *stats);
10337 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10340 /* Close the VF-reps before closing PF */
10342 bnxt_vf_reps_close(bp);
10344 /* Change device state to avoid TX queue wake up's */
10345 bnxt_tx_disable(bp);
10347 clear_bit(BNXT_STATE_OPEN, &bp->state);
10348 smp_mb__after_atomic();
10349 while (bnxt_drv_busy(bp))
10352 /* Flush rings and and disable interrupts */
10353 bnxt_shutdown_nic(bp, irq_re_init);
10355 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10357 bnxt_debug_dev_exit(bp);
10358 bnxt_disable_napi(bp);
10359 del_timer_sync(&bp->timer);
10360 bnxt_free_skbs(bp);
10362 /* Save ring stats before shutdown */
10363 if (bp->bnapi && irq_re_init)
10364 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10369 bnxt_free_mem(bp, irq_re_init);
10372 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10376 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10377 /* If we get here, it means firmware reset is in progress
10378 * while we are trying to close. We can safely proceed with
10379 * the close because we are holding rtnl_lock(). Some firmware
10380 * messages may fail as we proceed to close. We set the
10381 * ABORT_ERR flag here so that the FW reset thread will later
10382 * abort when it gets the rtnl_lock() and sees the flag.
10384 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10385 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10388 #ifdef CONFIG_BNXT_SRIOV
10389 if (bp->sriov_cfg) {
10390 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10392 BNXT_SRIOV_CFG_WAIT_TMO);
10394 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10397 __bnxt_close_nic(bp, irq_re_init, link_re_init);
10401 static int bnxt_close(struct net_device *dev)
10403 struct bnxt *bp = netdev_priv(dev);
10405 bnxt_hwmon_close(bp);
10406 bnxt_close_nic(bp, true, true);
10407 bnxt_hwrm_shutdown_link(bp);
10408 bnxt_hwrm_if_change(bp, false);
10412 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10415 struct hwrm_port_phy_mdio_read_output *resp;
10416 struct hwrm_port_phy_mdio_read_input *req;
10419 if (bp->hwrm_spec_code < 0x10a00)
10420 return -EOPNOTSUPP;
10422 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10426 req->port_id = cpu_to_le16(bp->pf.port_id);
10427 req->phy_addr = phy_addr;
10428 req->reg_addr = cpu_to_le16(reg & 0x1f);
10429 if (mdio_phy_id_is_c45(phy_addr)) {
10430 req->cl45_mdio = 1;
10431 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10432 req->dev_addr = mdio_phy_id_devad(phy_addr);
10433 req->reg_addr = cpu_to_le16(reg);
10436 resp = hwrm_req_hold(bp, req);
10437 rc = hwrm_req_send(bp, req);
10439 *val = le16_to_cpu(resp->reg_data);
10440 hwrm_req_drop(bp, req);
10444 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10447 struct hwrm_port_phy_mdio_write_input *req;
10450 if (bp->hwrm_spec_code < 0x10a00)
10451 return -EOPNOTSUPP;
10453 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10457 req->port_id = cpu_to_le16(bp->pf.port_id);
10458 req->phy_addr = phy_addr;
10459 req->reg_addr = cpu_to_le16(reg & 0x1f);
10460 if (mdio_phy_id_is_c45(phy_addr)) {
10461 req->cl45_mdio = 1;
10462 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10463 req->dev_addr = mdio_phy_id_devad(phy_addr);
10464 req->reg_addr = cpu_to_le16(reg);
10466 req->reg_data = cpu_to_le16(val);
10468 return hwrm_req_send(bp, req);
10471 /* rtnl_lock held */
10472 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10474 struct mii_ioctl_data *mdio = if_mii(ifr);
10475 struct bnxt *bp = netdev_priv(dev);
10480 mdio->phy_id = bp->link_info.phy_addr;
10483 case SIOCGMIIREG: {
10484 u16 mii_regval = 0;
10486 if (!netif_running(dev))
10489 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10491 mdio->val_out = mii_regval;
10496 if (!netif_running(dev))
10499 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10502 case SIOCSHWTSTAMP:
10503 return bnxt_hwtstamp_set(dev, ifr);
10505 case SIOCGHWTSTAMP:
10506 return bnxt_hwtstamp_get(dev, ifr);
10512 return -EOPNOTSUPP;
10515 static void bnxt_get_ring_stats(struct bnxt *bp,
10516 struct rtnl_link_stats64 *stats)
10520 for (i = 0; i < bp->cp_nr_rings; i++) {
10521 struct bnxt_napi *bnapi = bp->bnapi[i];
10522 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10523 u64 *sw = cpr->stats.sw_stats;
10525 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10526 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10527 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10529 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10530 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10531 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10533 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10534 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10535 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10537 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10538 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10539 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10541 stats->rx_missed_errors +=
10542 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10544 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10546 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10548 stats->rx_dropped +=
10549 cpr->sw_stats.rx.rx_netpoll_discards +
10550 cpr->sw_stats.rx.rx_oom_discards;
10554 static void bnxt_add_prev_stats(struct bnxt *bp,
10555 struct rtnl_link_stats64 *stats)
10557 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10559 stats->rx_packets += prev_stats->rx_packets;
10560 stats->tx_packets += prev_stats->tx_packets;
10561 stats->rx_bytes += prev_stats->rx_bytes;
10562 stats->tx_bytes += prev_stats->tx_bytes;
10563 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10564 stats->multicast += prev_stats->multicast;
10565 stats->rx_dropped += prev_stats->rx_dropped;
10566 stats->tx_dropped += prev_stats->tx_dropped;
10570 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10572 struct bnxt *bp = netdev_priv(dev);
10574 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10575 /* Make sure bnxt_close_nic() sees that we are reading stats before
10576 * we check the BNXT_STATE_OPEN flag.
10578 smp_mb__after_atomic();
10579 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10580 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10581 *stats = bp->net_stats_prev;
10585 bnxt_get_ring_stats(bp, stats);
10586 bnxt_add_prev_stats(bp, stats);
10588 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10589 u64 *rx = bp->port_stats.sw_stats;
10590 u64 *tx = bp->port_stats.sw_stats +
10591 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10593 stats->rx_crc_errors =
10594 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10595 stats->rx_frame_errors =
10596 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10597 stats->rx_length_errors =
10598 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10599 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10600 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10602 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10603 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10604 stats->collisions =
10605 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10606 stats->tx_fifo_errors =
10607 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10608 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10610 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10613 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10615 struct net_device *dev = bp->dev;
10616 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10617 struct netdev_hw_addr *ha;
10620 bool update = false;
10623 netdev_for_each_mc_addr(ha, dev) {
10624 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10625 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10626 vnic->mc_list_count = 0;
10630 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10631 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10638 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10640 if (mc_count != vnic->mc_list_count) {
10641 vnic->mc_list_count = mc_count;
10647 static bool bnxt_uc_list_updated(struct bnxt *bp)
10649 struct net_device *dev = bp->dev;
10650 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10651 struct netdev_hw_addr *ha;
10654 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10657 netdev_for_each_uc_addr(ha, dev) {
10658 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10666 static void bnxt_set_rx_mode(struct net_device *dev)
10668 struct bnxt *bp = netdev_priv(dev);
10669 struct bnxt_vnic_info *vnic;
10670 bool mc_update = false;
10674 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10677 vnic = &bp->vnic_info[0];
10678 mask = vnic->rx_mask;
10679 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10680 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10681 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10682 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10684 if (dev->flags & IFF_PROMISC)
10685 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10687 uc_update = bnxt_uc_list_updated(bp);
10689 if (dev->flags & IFF_BROADCAST)
10690 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10691 if (dev->flags & IFF_ALLMULTI) {
10692 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10693 vnic->mc_list_count = 0;
10695 mc_update = bnxt_mc_list_updated(bp, &mask);
10698 if (mask != vnic->rx_mask || uc_update || mc_update) {
10699 vnic->rx_mask = mask;
10701 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10702 bnxt_queue_sp_work(bp);
10706 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10708 struct net_device *dev = bp->dev;
10709 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10710 struct hwrm_cfa_l2_filter_free_input *req;
10711 struct netdev_hw_addr *ha;
10712 int i, off = 0, rc;
10715 netif_addr_lock_bh(dev);
10716 uc_update = bnxt_uc_list_updated(bp);
10717 netif_addr_unlock_bh(dev);
10722 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
10725 hwrm_req_hold(bp, req);
10726 for (i = 1; i < vnic->uc_filter_count; i++) {
10727 req->l2_filter_id = vnic->fw_l2_filter_id[i];
10729 rc = hwrm_req_send(bp, req);
10731 hwrm_req_drop(bp, req);
10733 vnic->uc_filter_count = 1;
10735 netif_addr_lock_bh(dev);
10736 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10737 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10739 netdev_for_each_uc_addr(ha, dev) {
10740 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10742 vnic->uc_filter_count++;
10745 netif_addr_unlock_bh(dev);
10747 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10748 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10750 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10752 vnic->uc_filter_count = i;
10758 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10759 !bnxt_promisc_ok(bp))
10760 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10761 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10762 if (rc && vnic->mc_list_count) {
10763 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10765 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10766 vnic->mc_list_count = 0;
10767 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10770 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10776 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10778 #ifdef CONFIG_BNXT_SRIOV
10779 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10780 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10782 /* No minimum rings were provisioned by the PF. Don't
10783 * reserve rings by default when device is down.
10785 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10788 if (!netif_running(bp->dev))
10795 /* If the chip and firmware supports RFS */
10796 static bool bnxt_rfs_supported(struct bnxt *bp)
10798 if (bp->flags & BNXT_FLAG_CHIP_P5) {
10799 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10803 /* 212 firmware is broken for aRFS */
10804 if (BNXT_FW_MAJ(bp) == 212)
10806 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10808 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10813 /* If runtime conditions support RFS */
10814 static bool bnxt_rfs_capable(struct bnxt *bp)
10816 #ifdef CONFIG_RFS_ACCEL
10817 int vnics, max_vnics, max_rss_ctxs;
10819 if (bp->flags & BNXT_FLAG_CHIP_P5)
10820 return bnxt_rfs_supported(bp);
10821 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10824 vnics = 1 + bp->rx_nr_rings;
10825 max_vnics = bnxt_get_max_func_vnics(bp);
10826 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10828 /* RSS contexts not a limiting factor */
10829 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10830 max_rss_ctxs = max_vnics;
10831 if (vnics > max_vnics || vnics > max_rss_ctxs) {
10832 if (bp->rx_nr_rings > 1)
10833 netdev_warn(bp->dev,
10834 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10835 min(max_rss_ctxs - 1, max_vnics - 1));
10839 if (!BNXT_NEW_RM(bp))
10842 if (vnics == bp->hw_resc.resv_vnics)
10845 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10846 if (vnics <= bp->hw_resc.resv_vnics)
10849 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10850 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10857 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10858 netdev_features_t features)
10860 struct bnxt *bp = netdev_priv(dev);
10861 netdev_features_t vlan_features;
10863 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10864 features &= ~NETIF_F_NTUPLE;
10866 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10867 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10869 if (!(features & NETIF_F_GRO))
10870 features &= ~NETIF_F_GRO_HW;
10872 if (features & NETIF_F_GRO_HW)
10873 features &= ~NETIF_F_LRO;
10875 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10876 * turned on or off together.
10878 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10879 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10880 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10881 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10882 else if (vlan_features)
10883 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10885 #ifdef CONFIG_BNXT_SRIOV
10886 if (BNXT_VF(bp) && bp->vf.vlan)
10887 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10892 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10894 struct bnxt *bp = netdev_priv(dev);
10895 u32 flags = bp->flags;
10898 bool re_init = false;
10899 bool update_tpa = false;
10901 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10902 if (features & NETIF_F_GRO_HW)
10903 flags |= BNXT_FLAG_GRO;
10904 else if (features & NETIF_F_LRO)
10905 flags |= BNXT_FLAG_LRO;
10907 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10908 flags &= ~BNXT_FLAG_TPA;
10910 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10911 flags |= BNXT_FLAG_STRIP_VLAN;
10913 if (features & NETIF_F_NTUPLE)
10914 flags |= BNXT_FLAG_RFS;
10916 changes = flags ^ bp->flags;
10917 if (changes & BNXT_FLAG_TPA) {
10919 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10920 (flags & BNXT_FLAG_TPA) == 0 ||
10921 (bp->flags & BNXT_FLAG_CHIP_P5))
10925 if (changes & ~BNXT_FLAG_TPA)
10928 if (flags != bp->flags) {
10929 u32 old_flags = bp->flags;
10931 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10934 bnxt_set_ring_params(bp);
10939 bnxt_close_nic(bp, false, false);
10942 bnxt_set_ring_params(bp);
10944 return bnxt_open_nic(bp, false, false);
10948 rc = bnxt_set_tpa(bp,
10949 (flags & BNXT_FLAG_TPA) ?
10952 bp->flags = old_flags;
10958 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
10961 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
10966 /* Check that there are at most 2 IPv6 extension headers, no
10967 * fragment header, and each is <= 64 bytes.
10969 start = nw_off + sizeof(*ip6h);
10970 nexthdr = &ip6h->nexthdr;
10971 while (ipv6_ext_hdr(*nexthdr)) {
10972 struct ipv6_opt_hdr *hp;
10975 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
10976 *nexthdr == NEXTHDR_FRAGMENT)
10978 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
10979 skb_headlen(skb), NULL);
10982 if (*nexthdr == NEXTHDR_AUTH)
10983 hdrlen = ipv6_authlen(hp);
10985 hdrlen = ipv6_optlen(hp);
10989 nexthdr = &hp->nexthdr;
10994 /* Caller will check inner protocol */
10995 if (skb->encapsulation) {
11001 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11002 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11005 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11006 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11008 struct udphdr *uh = udp_hdr(skb);
11009 __be16 udp_port = uh->dest;
11011 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11013 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11014 struct ethhdr *eh = inner_eth_hdr(skb);
11016 switch (eh->h_proto) {
11017 case htons(ETH_P_IP):
11019 case htons(ETH_P_IPV6):
11020 return bnxt_exthdr_check(bp, skb,
11021 skb_inner_network_offset(skb),
11028 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11030 switch (l4_proto) {
11032 return bnxt_udp_tunl_check(bp, skb);
11035 case IPPROTO_GRE: {
11036 switch (skb->inner_protocol) {
11039 case htons(ETH_P_IP):
11041 case htons(ETH_P_IPV6):
11046 /* Check ext headers of inner ipv6 */
11047 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11053 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11054 struct net_device *dev,
11055 netdev_features_t features)
11057 struct bnxt *bp = netdev_priv(dev);
11060 features = vlan_features_check(skb, features);
11061 switch (vlan_get_protocol(skb)) {
11062 case htons(ETH_P_IP):
11063 if (!skb->encapsulation)
11065 l4_proto = &ip_hdr(skb)->protocol;
11066 if (bnxt_tunl_check(bp, skb, *l4_proto))
11069 case htons(ETH_P_IPV6):
11070 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11073 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11077 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11080 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11083 struct hwrm_dbg_read_direct_output *resp;
11084 struct hwrm_dbg_read_direct_input *req;
11085 __le32 *dbg_reg_buf;
11086 dma_addr_t mapping;
11089 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11093 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11095 if (!dbg_reg_buf) {
11097 goto dbg_rd_reg_exit;
11100 req->host_dest_addr = cpu_to_le64(mapping);
11102 resp = hwrm_req_hold(bp, req);
11103 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11104 req->read_len32 = cpu_to_le32(num_words);
11106 rc = hwrm_req_send(bp, req);
11107 if (rc || resp->error_code) {
11109 goto dbg_rd_reg_exit;
11111 for (i = 0; i < num_words; i++)
11112 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11115 hwrm_req_drop(bp, req);
11119 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11120 u32 ring_id, u32 *prod, u32 *cons)
11122 struct hwrm_dbg_ring_info_get_output *resp;
11123 struct hwrm_dbg_ring_info_get_input *req;
11126 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11130 req->ring_type = ring_type;
11131 req->fw_ring_id = cpu_to_le32(ring_id);
11132 resp = hwrm_req_hold(bp, req);
11133 rc = hwrm_req_send(bp, req);
11135 *prod = le32_to_cpu(resp->producer_index);
11136 *cons = le32_to_cpu(resp->consumer_index);
11138 hwrm_req_drop(bp, req);
11142 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11144 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11145 int i = bnapi->index;
11150 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11151 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11155 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11157 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11158 int i = bnapi->index;
11163 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11164 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11165 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11166 rxr->rx_sw_agg_prod);
11169 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11171 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11172 int i = bnapi->index;
11174 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11175 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11178 static void bnxt_dbg_dump_states(struct bnxt *bp)
11181 struct bnxt_napi *bnapi;
11183 for (i = 0; i < bp->cp_nr_rings; i++) {
11184 bnapi = bp->bnapi[i];
11185 if (netif_msg_drv(bp)) {
11186 bnxt_dump_tx_sw_state(bnapi);
11187 bnxt_dump_rx_sw_state(bnapi);
11188 bnxt_dump_cp_sw_state(bnapi);
11193 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11195 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11196 struct hwrm_ring_reset_input *req;
11197 struct bnxt_napi *bnapi = rxr->bnapi;
11198 struct bnxt_cp_ring_info *cpr;
11202 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11206 cpr = &bnapi->cp_ring;
11207 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11208 req->cmpl_ring = cpu_to_le16(cp_ring_id);
11209 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11210 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11211 return hwrm_req_send_silent(bp, req);
11214 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11217 bnxt_dbg_dump_states(bp);
11218 if (netif_running(bp->dev)) {
11222 bnxt_close_nic(bp, false, false);
11223 bnxt_open_nic(bp, false, false);
11226 bnxt_close_nic(bp, true, false);
11227 rc = bnxt_open_nic(bp, true, false);
11228 bnxt_ulp_start(bp, rc);
11233 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11235 struct bnxt *bp = netdev_priv(dev);
11237 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
11238 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11239 bnxt_queue_sp_work(bp);
11242 static void bnxt_fw_health_check(struct bnxt *bp)
11244 struct bnxt_fw_health *fw_health = bp->fw_health;
11247 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11250 if (fw_health->tmr_counter) {
11251 fw_health->tmr_counter--;
11255 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11256 if (val == fw_health->last_fw_heartbeat)
11259 fw_health->last_fw_heartbeat = val;
11261 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11262 if (val != fw_health->last_fw_reset_cnt)
11265 fw_health->tmr_counter = fw_health->tmr_multiplier;
11269 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11270 bnxt_queue_sp_work(bp);
11273 static void bnxt_timer(struct timer_list *t)
11275 struct bnxt *bp = from_timer(bp, t, timer);
11276 struct net_device *dev = bp->dev;
11278 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11281 if (atomic_read(&bp->intr_sem) != 0)
11282 goto bnxt_restart_timer;
11284 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11285 bnxt_fw_health_check(bp);
11287 if (bp->link_info.link_up && bp->stats_coal_ticks) {
11288 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11289 bnxt_queue_sp_work(bp);
11292 if (bnxt_tc_flower_enabled(bp)) {
11293 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11294 bnxt_queue_sp_work(bp);
11297 #ifdef CONFIG_RFS_ACCEL
11298 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11299 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11300 bnxt_queue_sp_work(bp);
11302 #endif /*CONFIG_RFS_ACCEL*/
11304 if (bp->link_info.phy_retry) {
11305 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11306 bp->link_info.phy_retry = false;
11307 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11309 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11310 bnxt_queue_sp_work(bp);
11314 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11315 netif_carrier_ok(dev)) {
11316 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11317 bnxt_queue_sp_work(bp);
11319 bnxt_restart_timer:
11320 mod_timer(&bp->timer, jiffies + bp->current_interval);
11323 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11325 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11326 * set. If the device is being closed, bnxt_close() may be holding
11327 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11328 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11330 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11334 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11336 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11340 /* Only called from bnxt_sp_task() */
11341 static void bnxt_reset(struct bnxt *bp, bool silent)
11343 bnxt_rtnl_lock_sp(bp);
11344 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11345 bnxt_reset_task(bp, silent);
11346 bnxt_rtnl_unlock_sp(bp);
11349 /* Only called from bnxt_sp_task() */
11350 static void bnxt_rx_ring_reset(struct bnxt *bp)
11354 bnxt_rtnl_lock_sp(bp);
11355 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11356 bnxt_rtnl_unlock_sp(bp);
11359 /* Disable and flush TPA before resetting the RX ring */
11360 if (bp->flags & BNXT_FLAG_TPA)
11361 bnxt_set_tpa(bp, false);
11362 for (i = 0; i < bp->rx_nr_rings; i++) {
11363 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11364 struct bnxt_cp_ring_info *cpr;
11367 if (!rxr->bnapi->in_reset)
11370 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11372 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11373 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11375 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11377 bnxt_reset_task(bp, true);
11380 bnxt_free_one_rx_ring_skbs(bp, i);
11382 rxr->rx_agg_prod = 0;
11383 rxr->rx_sw_agg_prod = 0;
11384 rxr->rx_next_cons = 0;
11385 rxr->bnapi->in_reset = false;
11386 bnxt_alloc_one_rx_ring(bp, i);
11387 cpr = &rxr->bnapi->cp_ring;
11388 cpr->sw_stats.rx.rx_resets++;
11389 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11390 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11391 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11393 if (bp->flags & BNXT_FLAG_TPA)
11394 bnxt_set_tpa(bp, true);
11395 bnxt_rtnl_unlock_sp(bp);
11398 static void bnxt_fw_reset_close(struct bnxt *bp)
11401 /* When firmware is in fatal state, quiesce device and disable
11402 * bus master to prevent any potential bad DMAs before freeing
11405 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11408 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11410 bp->fw_reset_min_dsecs = 0;
11411 bnxt_tx_disable(bp);
11412 bnxt_disable_napi(bp);
11413 bnxt_disable_int_sync(bp);
11415 bnxt_clear_int_mode(bp);
11416 pci_disable_device(bp->pdev);
11418 __bnxt_close_nic(bp, true, false);
11419 bnxt_vf_reps_free(bp);
11420 bnxt_clear_int_mode(bp);
11421 bnxt_hwrm_func_drv_unrgtr(bp);
11422 if (pci_is_enabled(bp->pdev))
11423 pci_disable_device(bp->pdev);
11424 bnxt_free_ctx_mem(bp);
11429 static bool is_bnxt_fw_ok(struct bnxt *bp)
11431 struct bnxt_fw_health *fw_health = bp->fw_health;
11432 bool no_heartbeat = false, has_reset = false;
11435 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11436 if (val == fw_health->last_fw_heartbeat)
11437 no_heartbeat = true;
11439 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11440 if (val != fw_health->last_fw_reset_cnt)
11443 if (!no_heartbeat && has_reset)
11449 /* rtnl_lock is acquired before calling this function */
11450 static void bnxt_force_fw_reset(struct bnxt *bp)
11452 struct bnxt_fw_health *fw_health = bp->fw_health;
11453 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11456 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11457 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11461 spin_lock_bh(&ptp->ptp_lock);
11462 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11463 spin_unlock_bh(&ptp->ptp_lock);
11465 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11467 bnxt_fw_reset_close(bp);
11468 wait_dsecs = fw_health->master_func_wait_dsecs;
11469 if (fw_health->master) {
11470 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11472 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11474 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11475 wait_dsecs = fw_health->normal_func_wait_dsecs;
11476 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11479 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11480 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11481 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11484 void bnxt_fw_exception(struct bnxt *bp)
11486 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11487 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11488 bnxt_rtnl_lock_sp(bp);
11489 bnxt_force_fw_reset(bp);
11490 bnxt_rtnl_unlock_sp(bp);
11493 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11496 static int bnxt_get_registered_vfs(struct bnxt *bp)
11498 #ifdef CONFIG_BNXT_SRIOV
11504 rc = bnxt_hwrm_func_qcfg(bp);
11506 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11509 if (bp->pf.registered_vfs)
11510 return bp->pf.registered_vfs;
11517 void bnxt_fw_reset(struct bnxt *bp)
11519 bnxt_rtnl_lock_sp(bp);
11520 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11521 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11522 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11526 spin_lock_bh(&ptp->ptp_lock);
11527 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11528 spin_unlock_bh(&ptp->ptp_lock);
11530 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11532 if (bp->pf.active_vfs &&
11533 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11534 n = bnxt_get_registered_vfs(bp);
11536 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11538 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11539 dev_close(bp->dev);
11540 goto fw_reset_exit;
11541 } else if (n > 0) {
11542 u16 vf_tmo_dsecs = n * 10;
11544 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11545 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11546 bp->fw_reset_state =
11547 BNXT_FW_RESET_STATE_POLL_VF;
11548 bnxt_queue_fw_reset_work(bp, HZ / 10);
11549 goto fw_reset_exit;
11551 bnxt_fw_reset_close(bp);
11552 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11553 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11556 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11557 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11559 bnxt_queue_fw_reset_work(bp, tmo);
11562 bnxt_rtnl_unlock_sp(bp);
11565 static void bnxt_chk_missed_irq(struct bnxt *bp)
11569 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11572 for (i = 0; i < bp->cp_nr_rings; i++) {
11573 struct bnxt_napi *bnapi = bp->bnapi[i];
11574 struct bnxt_cp_ring_info *cpr;
11581 cpr = &bnapi->cp_ring;
11582 for (j = 0; j < 2; j++) {
11583 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11586 if (!cpr2 || cpr2->has_more_work ||
11587 !bnxt_has_work(bp, cpr2))
11590 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11591 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11594 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11595 bnxt_dbg_hwrm_ring_info_get(bp,
11596 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11597 fw_ring_id, &val[0], &val[1]);
11598 cpr->sw_stats.cmn.missed_irqs++;
11603 static void bnxt_cfg_ntp_filters(struct bnxt *);
11605 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11607 struct bnxt_link_info *link_info = &bp->link_info;
11609 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11610 link_info->autoneg = BNXT_AUTONEG_SPEED;
11611 if (bp->hwrm_spec_code >= 0x10201) {
11612 if (link_info->auto_pause_setting &
11613 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11614 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11616 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11618 link_info->advertising = link_info->auto_link_speeds;
11619 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11621 link_info->req_link_speed = link_info->force_link_speed;
11622 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11623 if (link_info->force_pam4_link_speed) {
11624 link_info->req_link_speed =
11625 link_info->force_pam4_link_speed;
11626 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11628 link_info->req_duplex = link_info->duplex_setting;
11630 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11631 link_info->req_flow_ctrl =
11632 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11634 link_info->req_flow_ctrl = link_info->force_pause_setting;
11637 static void bnxt_fw_echo_reply(struct bnxt *bp)
11639 struct bnxt_fw_health *fw_health = bp->fw_health;
11640 struct hwrm_func_echo_response_input *req;
11643 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
11646 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11647 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11648 hwrm_req_send(bp, req);
11651 static void bnxt_sp_task(struct work_struct *work)
11653 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11655 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11656 smp_mb__after_atomic();
11657 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11658 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11662 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11663 bnxt_cfg_rx_mode(bp);
11665 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11666 bnxt_cfg_ntp_filters(bp);
11667 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11668 bnxt_hwrm_exec_fwd_req(bp);
11669 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11670 bnxt_hwrm_port_qstats(bp, 0);
11671 bnxt_hwrm_port_qstats_ext(bp, 0);
11672 bnxt_accumulate_all_stats(bp);
11675 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11678 mutex_lock(&bp->link_lock);
11679 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11681 bnxt_hwrm_phy_qcaps(bp);
11683 rc = bnxt_update_link(bp, true);
11685 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11688 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11690 bnxt_init_ethtool_link_settings(bp);
11691 mutex_unlock(&bp->link_lock);
11693 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11696 mutex_lock(&bp->link_lock);
11697 rc = bnxt_update_phy_setting(bp);
11698 mutex_unlock(&bp->link_lock);
11700 netdev_warn(bp->dev, "update phy settings retry failed\n");
11702 bp->link_info.phy_retry = false;
11703 netdev_info(bp->dev, "update phy settings retry succeeded\n");
11706 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11707 mutex_lock(&bp->link_lock);
11708 bnxt_get_port_module_status(bp);
11709 mutex_unlock(&bp->link_lock);
11712 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11713 bnxt_tc_flow_stats_work(bp);
11715 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11716 bnxt_chk_missed_irq(bp);
11718 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11719 bnxt_fw_echo_reply(bp);
11721 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
11722 * must be the last functions to be called before exiting.
11724 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11725 bnxt_reset(bp, false);
11727 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11728 bnxt_reset(bp, true);
11730 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11731 bnxt_rx_ring_reset(bp);
11733 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11734 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11736 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11737 if (!is_bnxt_fw_ok(bp))
11738 bnxt_devlink_health_report(bp,
11739 BNXT_FW_EXCEPTION_SP_EVENT);
11742 smp_mb__before_atomic();
11743 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11746 /* Under rtnl_lock */
11747 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11750 int max_rx, max_tx, tx_sets = 1;
11751 int tx_rings_needed, stats;
11758 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11765 tx_rings_needed = tx * tx_sets + tx_xdp;
11766 if (max_tx < tx_rings_needed)
11770 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11773 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11775 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11777 if (BNXT_NEW_RM(bp)) {
11778 cp += bnxt_get_ulp_msix_num(bp);
11779 stats += bnxt_get_ulp_stat_ctxs(bp);
11781 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11785 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11788 pci_iounmap(pdev, bp->bar2);
11793 pci_iounmap(pdev, bp->bar1);
11798 pci_iounmap(pdev, bp->bar0);
11803 static void bnxt_cleanup_pci(struct bnxt *bp)
11805 bnxt_unmap_bars(bp, bp->pdev);
11806 pci_release_regions(bp->pdev);
11807 if (pci_is_enabled(bp->pdev))
11808 pci_disable_device(bp->pdev);
11811 static void bnxt_init_dflt_coal(struct bnxt *bp)
11813 struct bnxt_coal *coal;
11815 /* Tick values in micro seconds.
11816 * 1 coal_buf x bufs_per_record = 1 completion record.
11818 coal = &bp->rx_coal;
11819 coal->coal_ticks = 10;
11820 coal->coal_bufs = 30;
11821 coal->coal_ticks_irq = 1;
11822 coal->coal_bufs_irq = 2;
11823 coal->idle_thresh = 50;
11824 coal->bufs_per_record = 2;
11825 coal->budget = 64; /* NAPI budget */
11827 coal = &bp->tx_coal;
11828 coal->coal_ticks = 28;
11829 coal->coal_bufs = 30;
11830 coal->coal_ticks_irq = 2;
11831 coal->coal_bufs_irq = 2;
11832 coal->bufs_per_record = 1;
11834 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11837 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11842 rc = bnxt_hwrm_ver_get(bp);
11843 bnxt_try_map_fw_health_reg(bp);
11845 rc = bnxt_try_recover_fw(bp);
11848 rc = bnxt_hwrm_ver_get(bp);
11853 bnxt_nvm_cfg_ver_get(bp);
11855 rc = bnxt_hwrm_func_reset(bp);
11859 bnxt_hwrm_fw_set_time(bp);
11863 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11867 /* Get the MAX capabilities for this function */
11868 rc = bnxt_hwrm_func_qcaps(bp);
11870 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11875 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11877 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11880 if (bnxt_alloc_fw_health(bp)) {
11881 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11883 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11885 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11889 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11893 bnxt_hwrm_func_qcfg(bp);
11894 bnxt_hwrm_vnic_qcaps(bp);
11895 bnxt_hwrm_port_led_qcaps(bp);
11896 bnxt_ethtool_init(bp);
11901 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11903 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11904 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11905 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11906 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11907 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11908 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11909 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11910 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11911 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11915 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11917 struct net_device *dev = bp->dev;
11919 dev->hw_features &= ~NETIF_F_NTUPLE;
11920 dev->features &= ~NETIF_F_NTUPLE;
11921 bp->flags &= ~BNXT_FLAG_RFS;
11922 if (bnxt_rfs_supported(bp)) {
11923 dev->hw_features |= NETIF_F_NTUPLE;
11924 if (bnxt_rfs_capable(bp)) {
11925 bp->flags |= BNXT_FLAG_RFS;
11926 dev->features |= NETIF_F_NTUPLE;
11931 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11933 struct pci_dev *pdev = bp->pdev;
11935 bnxt_set_dflt_rss_hash_type(bp);
11936 bnxt_set_dflt_rfs(bp);
11938 bnxt_get_wol_settings(bp);
11939 if (bp->flags & BNXT_FLAG_WOL_CAP)
11940 device_set_wakeup_enable(&pdev->dev, bp->wol);
11942 device_set_wakeup_capable(&pdev->dev, false);
11944 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11945 bnxt_hwrm_coal_params_qcaps(bp);
11948 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11950 static int bnxt_fw_init_one(struct bnxt *bp)
11954 rc = bnxt_fw_init_one_p1(bp);
11956 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11959 rc = bnxt_fw_init_one_p2(bp);
11961 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11964 rc = bnxt_probe_phy(bp, false);
11967 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11971 /* In case fw capabilities have changed, destroy the unneeded
11972 * reporters and create newly capable ones.
11974 bnxt_dl_fw_reporters_destroy(bp, false);
11975 bnxt_dl_fw_reporters_create(bp);
11976 bnxt_fw_init_one_p3(bp);
11980 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11982 struct bnxt_fw_health *fw_health = bp->fw_health;
11983 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11984 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11985 u32 reg_type, reg_off, delay_msecs;
11987 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11988 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11989 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11990 switch (reg_type) {
11991 case BNXT_FW_HEALTH_REG_TYPE_CFG:
11992 pci_write_config_dword(bp->pdev, reg_off, val);
11994 case BNXT_FW_HEALTH_REG_TYPE_GRC:
11995 writel(reg_off & BNXT_GRC_BASE_MASK,
11996 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11997 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
11999 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12000 writel(val, bp->bar0 + reg_off);
12002 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12003 writel(val, bp->bar1 + reg_off);
12007 pci_read_config_dword(bp->pdev, 0, &val);
12008 msleep(delay_msecs);
12012 static void bnxt_reset_all(struct bnxt *bp)
12014 struct bnxt_fw_health *fw_health = bp->fw_health;
12017 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12018 bnxt_fw_reset_via_optee(bp);
12019 bp->fw_reset_timestamp = jiffies;
12023 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12024 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12025 bnxt_fw_reset_writel(bp, i);
12026 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
12027 struct hwrm_fw_reset_input *req;
12029 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12031 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12032 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12033 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12034 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12035 rc = hwrm_req_send(bp, req);
12038 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12040 bp->fw_reset_timestamp = jiffies;
12043 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12045 return time_after(jiffies, bp->fw_reset_timestamp +
12046 (bp->fw_reset_max_dsecs * HZ / 10));
12049 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12051 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12052 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12053 bnxt_ulp_start(bp, rc);
12054 bnxt_dl_health_status_update(bp, false);
12056 bp->fw_reset_state = 0;
12057 dev_close(bp->dev);
12060 static void bnxt_fw_reset_task(struct work_struct *work)
12062 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12065 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12066 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12070 switch (bp->fw_reset_state) {
12071 case BNXT_FW_RESET_STATE_POLL_VF: {
12072 int n = bnxt_get_registered_vfs(bp);
12076 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12077 n, jiffies_to_msecs(jiffies -
12078 bp->fw_reset_timestamp));
12079 goto fw_reset_abort;
12080 } else if (n > 0) {
12081 if (bnxt_fw_reset_timeout(bp)) {
12082 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12083 bp->fw_reset_state = 0;
12084 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12088 bnxt_queue_fw_reset_work(bp, HZ / 10);
12091 bp->fw_reset_timestamp = jiffies;
12093 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12094 bnxt_fw_reset_abort(bp, rc);
12098 bnxt_fw_reset_close(bp);
12099 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12100 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12103 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12104 tmo = bp->fw_reset_min_dsecs * HZ / 10;
12107 bnxt_queue_fw_reset_work(bp, tmo);
12110 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12113 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12114 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12115 !bnxt_fw_reset_timeout(bp)) {
12116 bnxt_queue_fw_reset_work(bp, HZ / 5);
12120 if (!bp->fw_health->master) {
12121 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12123 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12124 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12127 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12130 case BNXT_FW_RESET_STATE_RESET_FW:
12131 bnxt_reset_all(bp);
12132 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12133 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12135 case BNXT_FW_RESET_STATE_ENABLE_DEV:
12136 bnxt_inv_fw_health_reg(bp);
12137 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12138 !bp->fw_reset_min_dsecs) {
12141 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12142 if (val == 0xffff) {
12143 if (bnxt_fw_reset_timeout(bp)) {
12144 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12146 goto fw_reset_abort;
12148 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12152 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12153 if (pci_enable_device(bp->pdev)) {
12154 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12156 goto fw_reset_abort;
12158 pci_set_master(bp->pdev);
12159 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12161 case BNXT_FW_RESET_STATE_POLL_FW:
12162 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12163 rc = bnxt_hwrm_poll(bp);
12165 if (bnxt_fw_reset_timeout(bp)) {
12166 netdev_err(bp->dev, "Firmware reset aborted\n");
12167 goto fw_reset_abort_status;
12169 bnxt_queue_fw_reset_work(bp, HZ / 5);
12172 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12173 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12175 case BNXT_FW_RESET_STATE_OPENING:
12176 while (!rtnl_trylock()) {
12177 bnxt_queue_fw_reset_work(bp, HZ / 10);
12180 rc = bnxt_open(bp->dev);
12182 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12183 bnxt_fw_reset_abort(bp, rc);
12188 bp->fw_reset_state = 0;
12189 /* Make sure fw_reset_state is 0 before clearing the flag */
12190 smp_mb__before_atomic();
12191 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12192 bnxt_ulp_start(bp, 0);
12193 bnxt_reenable_sriov(bp);
12194 bnxt_vf_reps_alloc(bp);
12195 bnxt_vf_reps_open(bp);
12196 bnxt_ptp_reapply_pps(bp);
12197 bnxt_dl_health_recovery_done(bp);
12198 bnxt_dl_health_status_update(bp, true);
12204 fw_reset_abort_status:
12205 if (bp->fw_health->status_reliable ||
12206 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12207 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12209 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12213 bnxt_fw_reset_abort(bp, rc);
12217 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12220 struct bnxt *bp = netdev_priv(dev);
12222 SET_NETDEV_DEV(dev, &pdev->dev);
12224 /* enable device (incl. PCI PM wakeup), and bus-mastering */
12225 rc = pci_enable_device(pdev);
12227 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12231 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12232 dev_err(&pdev->dev,
12233 "Cannot find PCI device base address, aborting\n");
12235 goto init_err_disable;
12238 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12240 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12241 goto init_err_disable;
12244 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12245 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12246 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12248 goto init_err_release;
12251 pci_set_master(pdev);
12256 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12257 * determines the BAR size.
12259 bp->bar0 = pci_ioremap_bar(pdev, 0);
12261 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12263 goto init_err_release;
12266 bp->bar2 = pci_ioremap_bar(pdev, 4);
12268 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12270 goto init_err_release;
12273 pci_enable_pcie_error_reporting(pdev);
12275 INIT_WORK(&bp->sp_task, bnxt_sp_task);
12276 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12278 spin_lock_init(&bp->ntp_fltr_lock);
12279 #if BITS_PER_LONG == 32
12280 spin_lock_init(&bp->db_lock);
12283 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12284 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12286 bnxt_init_dflt_coal(bp);
12288 timer_setup(&bp->timer, bnxt_timer, 0);
12289 bp->current_interval = BNXT_TIMER_INTERVAL;
12291 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12292 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12294 clear_bit(BNXT_STATE_OPEN, &bp->state);
12298 bnxt_unmap_bars(bp, pdev);
12299 pci_release_regions(pdev);
12302 pci_disable_device(pdev);
12308 /* rtnl_lock held */
12309 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12311 struct sockaddr *addr = p;
12312 struct bnxt *bp = netdev_priv(dev);
12315 if (!is_valid_ether_addr(addr->sa_data))
12316 return -EADDRNOTAVAIL;
12318 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12321 rc = bnxt_approve_mac(bp, addr->sa_data, true);
12325 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12326 if (netif_running(dev)) {
12327 bnxt_close_nic(bp, false, false);
12328 rc = bnxt_open_nic(bp, false, false);
12334 /* rtnl_lock held */
12335 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12337 struct bnxt *bp = netdev_priv(dev);
12339 if (netif_running(dev))
12340 bnxt_close_nic(bp, true, false);
12342 dev->mtu = new_mtu;
12343 bnxt_set_ring_params(bp);
12345 if (netif_running(dev))
12346 return bnxt_open_nic(bp, true, false);
12351 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12353 struct bnxt *bp = netdev_priv(dev);
12357 if (tc > bp->max_tc) {
12358 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12363 if (netdev_get_num_tc(dev) == tc)
12366 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12369 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12370 sh, tc, bp->tx_nr_rings_xdp);
12374 /* Needs to close the device and do hw resource re-allocations */
12375 if (netif_running(bp->dev))
12376 bnxt_close_nic(bp, true, false);
12379 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12380 netdev_set_num_tc(dev, tc);
12382 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12383 netdev_reset_tc(dev);
12385 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12386 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12387 bp->tx_nr_rings + bp->rx_nr_rings;
12389 if (netif_running(bp->dev))
12390 return bnxt_open_nic(bp, true, false);
12395 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12398 struct bnxt *bp = cb_priv;
12400 if (!bnxt_tc_flower_enabled(bp) ||
12401 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12402 return -EOPNOTSUPP;
12405 case TC_SETUP_CLSFLOWER:
12406 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12408 return -EOPNOTSUPP;
12412 LIST_HEAD(bnxt_block_cb_list);
12414 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12417 struct bnxt *bp = netdev_priv(dev);
12420 case TC_SETUP_BLOCK:
12421 return flow_block_cb_setup_simple(type_data,
12422 &bnxt_block_cb_list,
12423 bnxt_setup_tc_block_cb,
12425 case TC_SETUP_QDISC_MQPRIO: {
12426 struct tc_mqprio_qopt *mqprio = type_data;
12428 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12430 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12433 return -EOPNOTSUPP;
12437 #ifdef CONFIG_RFS_ACCEL
12438 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12439 struct bnxt_ntuple_filter *f2)
12441 struct flow_keys *keys1 = &f1->fkeys;
12442 struct flow_keys *keys2 = &f2->fkeys;
12444 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12445 keys1->basic.ip_proto != keys2->basic.ip_proto)
12448 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12449 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12450 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12453 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12454 sizeof(keys1->addrs.v6addrs.src)) ||
12455 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12456 sizeof(keys1->addrs.v6addrs.dst)))
12460 if (keys1->ports.ports == keys2->ports.ports &&
12461 keys1->control.flags == keys2->control.flags &&
12462 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12463 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12469 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12470 u16 rxq_index, u32 flow_id)
12472 struct bnxt *bp = netdev_priv(dev);
12473 struct bnxt_ntuple_filter *fltr, *new_fltr;
12474 struct flow_keys *fkeys;
12475 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12476 int rc = 0, idx, bit_id, l2_idx = 0;
12477 struct hlist_head *head;
12480 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12481 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12484 netif_addr_lock_bh(dev);
12485 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12486 if (ether_addr_equal(eth->h_dest,
12487 vnic->uc_list + off)) {
12492 netif_addr_unlock_bh(dev);
12496 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12500 fkeys = &new_fltr->fkeys;
12501 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12502 rc = -EPROTONOSUPPORT;
12506 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12507 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12508 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12509 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12510 rc = -EPROTONOSUPPORT;
12513 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12514 bp->hwrm_spec_code < 0x10601) {
12515 rc = -EPROTONOSUPPORT;
12518 flags = fkeys->control.flags;
12519 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12520 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12521 rc = -EPROTONOSUPPORT;
12525 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12526 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12528 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12529 head = &bp->ntp_fltr_hash_tbl[idx];
12531 hlist_for_each_entry_rcu(fltr, head, hash) {
12532 if (bnxt_fltr_match(fltr, new_fltr)) {
12540 spin_lock_bh(&bp->ntp_fltr_lock);
12541 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12542 BNXT_NTP_FLTR_MAX_FLTR, 0);
12544 spin_unlock_bh(&bp->ntp_fltr_lock);
12549 new_fltr->sw_id = (u16)bit_id;
12550 new_fltr->flow_id = flow_id;
12551 new_fltr->l2_fltr_idx = l2_idx;
12552 new_fltr->rxq = rxq_index;
12553 hlist_add_head_rcu(&new_fltr->hash, head);
12554 bp->ntp_fltr_count++;
12555 spin_unlock_bh(&bp->ntp_fltr_lock);
12557 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12558 bnxt_queue_sp_work(bp);
12560 return new_fltr->sw_id;
12567 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12571 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12572 struct hlist_head *head;
12573 struct hlist_node *tmp;
12574 struct bnxt_ntuple_filter *fltr;
12577 head = &bp->ntp_fltr_hash_tbl[i];
12578 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12581 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12582 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12585 bnxt_hwrm_cfa_ntuple_filter_free(bp,
12590 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12595 set_bit(BNXT_FLTR_VALID, &fltr->state);
12599 spin_lock_bh(&bp->ntp_fltr_lock);
12600 hlist_del_rcu(&fltr->hash);
12601 bp->ntp_fltr_count--;
12602 spin_unlock_bh(&bp->ntp_fltr_lock);
12604 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12609 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12610 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12615 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12619 #endif /* CONFIG_RFS_ACCEL */
12621 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12623 struct bnxt *bp = netdev_priv(netdev);
12624 struct udp_tunnel_info ti;
12627 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12628 if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
12629 bp->vxlan_port = ti.port;
12630 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12632 bp->nge_port = ti.port;
12633 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12637 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12639 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12642 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12643 .sync_table = bnxt_udp_tunnel_sync,
12644 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12645 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12647 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
12648 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12652 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12653 struct net_device *dev, u32 filter_mask,
12656 struct bnxt *bp = netdev_priv(dev);
12658 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12659 nlflags, filter_mask, NULL);
12662 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12663 u16 flags, struct netlink_ext_ack *extack)
12665 struct bnxt *bp = netdev_priv(dev);
12666 struct nlattr *attr, *br_spec;
12669 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12670 return -EOPNOTSUPP;
12672 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12676 nla_for_each_nested(attr, br_spec, rem) {
12679 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12682 if (nla_len(attr) < sizeof(mode))
12685 mode = nla_get_u16(attr);
12686 if (mode == bp->br_mode)
12689 rc = bnxt_hwrm_set_br_mode(bp, mode);
12691 bp->br_mode = mode;
12697 int bnxt_get_port_parent_id(struct net_device *dev,
12698 struct netdev_phys_item_id *ppid)
12700 struct bnxt *bp = netdev_priv(dev);
12702 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12703 return -EOPNOTSUPP;
12705 /* The PF and it's VF-reps only support the switchdev framework */
12706 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12707 return -EOPNOTSUPP;
12709 ppid->id_len = sizeof(bp->dsn);
12710 memcpy(ppid->id, bp->dsn, ppid->id_len);
12715 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12717 struct bnxt *bp = netdev_priv(dev);
12719 return &bp->dl_port;
12722 static const struct net_device_ops bnxt_netdev_ops = {
12723 .ndo_open = bnxt_open,
12724 .ndo_start_xmit = bnxt_start_xmit,
12725 .ndo_stop = bnxt_close,
12726 .ndo_get_stats64 = bnxt_get_stats64,
12727 .ndo_set_rx_mode = bnxt_set_rx_mode,
12728 .ndo_eth_ioctl = bnxt_ioctl,
12729 .ndo_validate_addr = eth_validate_addr,
12730 .ndo_set_mac_address = bnxt_change_mac_addr,
12731 .ndo_change_mtu = bnxt_change_mtu,
12732 .ndo_fix_features = bnxt_fix_features,
12733 .ndo_set_features = bnxt_set_features,
12734 .ndo_features_check = bnxt_features_check,
12735 .ndo_tx_timeout = bnxt_tx_timeout,
12736 #ifdef CONFIG_BNXT_SRIOV
12737 .ndo_get_vf_config = bnxt_get_vf_config,
12738 .ndo_set_vf_mac = bnxt_set_vf_mac,
12739 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
12740 .ndo_set_vf_rate = bnxt_set_vf_bw,
12741 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
12742 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
12743 .ndo_set_vf_trust = bnxt_set_vf_trust,
12745 .ndo_setup_tc = bnxt_setup_tc,
12746 #ifdef CONFIG_RFS_ACCEL
12747 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
12749 .ndo_bpf = bnxt_xdp,
12750 .ndo_xdp_xmit = bnxt_xdp_xmit,
12751 .ndo_bridge_getlink = bnxt_bridge_getlink,
12752 .ndo_bridge_setlink = bnxt_bridge_setlink,
12753 .ndo_get_devlink_port = bnxt_get_devlink_port,
12756 static void bnxt_remove_one(struct pci_dev *pdev)
12758 struct net_device *dev = pci_get_drvdata(pdev);
12759 struct bnxt *bp = netdev_priv(dev);
12762 bnxt_sriov_disable(bp);
12765 devlink_port_type_clear(&bp->dl_port);
12767 bnxt_ptp_clear(bp);
12768 pci_disable_pcie_error_reporting(pdev);
12769 unregister_netdev(dev);
12770 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12771 /* Flush any pending tasks */
12772 cancel_work_sync(&bp->sp_task);
12773 cancel_delayed_work_sync(&bp->fw_reset_task);
12776 bnxt_dl_fw_reporters_destroy(bp, true);
12777 bnxt_dl_unregister(bp);
12778 bnxt_shutdown_tc(bp);
12780 bnxt_clear_int_mode(bp);
12781 bnxt_hwrm_func_drv_unrgtr(bp);
12782 bnxt_free_hwrm_resources(bp);
12783 bnxt_ethtool_free(bp);
12787 kfree(bp->ptp_cfg);
12788 bp->ptp_cfg = NULL;
12789 kfree(bp->fw_health);
12790 bp->fw_health = NULL;
12791 bnxt_cleanup_pci(bp);
12792 bnxt_free_ctx_mem(bp);
12795 kfree(bp->rss_indir_tbl);
12796 bp->rss_indir_tbl = NULL;
12797 bnxt_free_port_stats(bp);
12801 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12804 struct bnxt_link_info *link_info = &bp->link_info;
12807 rc = bnxt_hwrm_phy_qcaps(bp);
12809 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12813 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12814 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12816 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
12820 mutex_lock(&bp->link_lock);
12821 rc = bnxt_update_link(bp, false);
12823 mutex_unlock(&bp->link_lock);
12824 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12829 /* Older firmware does not have supported_auto_speeds, so assume
12830 * that all supported speeds can be autonegotiated.
12832 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12833 link_info->support_auto_speeds = link_info->support_speeds;
12835 bnxt_init_ethtool_link_settings(bp);
12836 mutex_unlock(&bp->link_lock);
12840 static int bnxt_get_max_irq(struct pci_dev *pdev)
12844 if (!pdev->msix_cap)
12847 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12848 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12851 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12854 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12855 int max_ring_grps = 0, max_irq;
12857 *max_tx = hw_resc->max_tx_rings;
12858 *max_rx = hw_resc->max_rx_rings;
12859 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12860 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12861 bnxt_get_ulp_msix_num(bp),
12862 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12863 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12864 *max_cp = min_t(int, *max_cp, max_irq);
12865 max_ring_grps = hw_resc->max_hw_ring_grps;
12866 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12870 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12872 if (bp->flags & BNXT_FLAG_CHIP_P5) {
12873 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12874 /* On P5 chips, max_cp output param should be available NQs */
12877 *max_rx = min_t(int, *max_rx, max_ring_grps);
12880 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12884 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
12887 if (!rx || !tx || !cp)
12890 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12893 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12898 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12899 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12900 /* Not enough rings, try disabling agg rings. */
12901 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12902 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12904 /* set BNXT_FLAG_AGG_RINGS back for consistency */
12905 bp->flags |= BNXT_FLAG_AGG_RINGS;
12908 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12909 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12910 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12911 bnxt_set_ring_params(bp);
12914 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12915 int max_cp, max_stat, max_irq;
12917 /* Reserve minimum resources for RoCE */
12918 max_cp = bnxt_get_max_func_cp_rings(bp);
12919 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12920 max_irq = bnxt_get_max_func_irqs(bp);
12921 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12922 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12923 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12926 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12927 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12928 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12929 max_cp = min_t(int, max_cp, max_irq);
12930 max_cp = min_t(int, max_cp, max_stat);
12931 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12938 /* In initial default shared ring setting, each shared ring must have a
12941 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12943 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12944 bp->rx_nr_rings = bp->cp_nr_rings;
12945 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12946 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12949 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12951 int dflt_rings, max_rx_rings, max_tx_rings, rc;
12953 if (!bnxt_can_reserve_rings(bp))
12957 bp->flags |= BNXT_FLAG_SHARED_RINGS;
12958 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
12959 /* Reduce default rings on multi-port cards so that total default
12960 * rings do not exceed CPU count.
12962 if (bp->port_count > 1) {
12964 max_t(int, num_online_cpus() / bp->port_count, 1);
12966 dflt_rings = min_t(int, dflt_rings, max_rings);
12968 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
12971 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12972 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
12974 bnxt_trim_dflt_sh_rings(bp);
12976 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12977 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12979 rc = __bnxt_reserve_rings(bp);
12981 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
12982 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12984 bnxt_trim_dflt_sh_rings(bp);
12986 /* Rings may have been trimmed, re-reserve the trimmed rings. */
12987 if (bnxt_need_reserve_rings(bp)) {
12988 rc = __bnxt_reserve_rings(bp);
12990 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12991 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12993 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12998 bp->tx_nr_rings = 0;
12999 bp->rx_nr_rings = 0;
13004 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13008 if (bp->tx_nr_rings)
13011 bnxt_ulp_irq_stop(bp);
13012 bnxt_clear_int_mode(bp);
13013 rc = bnxt_set_dflt_rings(bp, true);
13015 netdev_err(bp->dev, "Not enough rings available.\n");
13016 goto init_dflt_ring_err;
13018 rc = bnxt_init_int_mode(bp);
13020 goto init_dflt_ring_err;
13022 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13023 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
13024 bp->flags |= BNXT_FLAG_RFS;
13025 bp->dev->features |= NETIF_F_NTUPLE;
13027 init_dflt_ring_err:
13028 bnxt_ulp_irq_restart(bp, rc);
13032 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13037 bnxt_hwrm_func_qcaps(bp);
13039 if (netif_running(bp->dev))
13040 __bnxt_close_nic(bp, true, false);
13042 bnxt_ulp_irq_stop(bp);
13043 bnxt_clear_int_mode(bp);
13044 rc = bnxt_init_int_mode(bp);
13045 bnxt_ulp_irq_restart(bp, rc);
13047 if (netif_running(bp->dev)) {
13049 dev_close(bp->dev);
13051 rc = bnxt_open_nic(bp, true, false);
13057 static int bnxt_init_mac_addr(struct bnxt *bp)
13062 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
13064 #ifdef CONFIG_BNXT_SRIOV
13065 struct bnxt_vf_info *vf = &bp->vf;
13066 bool strict_approval = true;
13068 if (is_valid_ether_addr(vf->mac_addr)) {
13069 /* overwrite netdev dev_addr with admin VF MAC */
13070 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
13071 /* Older PF driver or firmware may not approve this
13074 strict_approval = false;
13076 eth_hw_addr_random(bp->dev);
13078 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13084 #define BNXT_VPD_LEN 512
13085 static void bnxt_vpd_read_info(struct bnxt *bp)
13087 struct pci_dev *pdev = bp->pdev;
13088 int i, len, pos, ro_size, size;
13092 vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
13096 vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
13097 if (vpd_size <= 0) {
13098 netdev_err(bp->dev, "Unable to read VPD\n");
13102 i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
13104 netdev_err(bp->dev, "VPD READ-Only not found\n");
13108 i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
13110 netdev_err(bp->dev, "VPD READ-Only not found\n");
13114 ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
13115 i += PCI_VPD_LRDT_TAG_SIZE;
13116 if (i + ro_size > vpd_size)
13119 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13120 PCI_VPD_RO_KEYWORD_PARTNO);
13124 len = pci_vpd_info_field_size(&vpd_data[pos]);
13125 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13126 if (len + pos > vpd_size)
13129 size = min(len, BNXT_VPD_FLD_LEN - 1);
13130 memcpy(bp->board_partno, &vpd_data[pos], size);
13133 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13134 PCI_VPD_RO_KEYWORD_SERIALNO);
13138 len = pci_vpd_info_field_size(&vpd_data[pos]);
13139 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13140 if (len + pos > vpd_size)
13143 size = min(len, BNXT_VPD_FLD_LEN - 1);
13144 memcpy(bp->board_serialno, &vpd_data[pos], size);
13149 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13151 struct pci_dev *pdev = bp->pdev;
13154 qword = pci_get_dsn(pdev);
13156 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13157 return -EOPNOTSUPP;
13160 put_unaligned_le64(qword, dsn);
13162 bp->flags |= BNXT_FLAG_DSN_VALID;
13166 static int bnxt_map_db_bar(struct bnxt *bp)
13170 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13176 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13178 struct net_device *dev;
13182 if (pci_is_bridge(pdev))
13185 /* Clear any pending DMA transactions from crash kernel
13186 * while loading driver in capture kernel.
13188 if (is_kdump_kernel()) {
13189 pci_clear_master(pdev);
13193 max_irqs = bnxt_get_max_irq(pdev);
13194 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13198 bp = netdev_priv(dev);
13199 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13200 bnxt_set_max_func_irqs(bp, max_irqs);
13202 if (bnxt_vf_pciid(ent->driver_data))
13203 bp->flags |= BNXT_FLAG_VF;
13205 if (pdev->msix_cap)
13206 bp->flags |= BNXT_FLAG_MSIX_CAP;
13208 rc = bnxt_init_board(pdev, dev);
13210 goto init_err_free;
13212 dev->netdev_ops = &bnxt_netdev_ops;
13213 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13214 dev->ethtool_ops = &bnxt_ethtool_ops;
13215 pci_set_drvdata(pdev, dev);
13217 rc = bnxt_alloc_hwrm_resources(bp);
13219 goto init_err_pci_clean;
13221 mutex_init(&bp->hwrm_cmd_lock);
13222 mutex_init(&bp->link_lock);
13224 rc = bnxt_fw_init_one_p1(bp);
13226 goto init_err_pci_clean;
13229 bnxt_vpd_read_info(bp);
13231 if (BNXT_CHIP_P5(bp)) {
13232 bp->flags |= BNXT_FLAG_CHIP_P5;
13233 if (BNXT_CHIP_SR2(bp))
13234 bp->flags |= BNXT_FLAG_CHIP_SR2;
13237 rc = bnxt_alloc_rss_indir_tbl(bp);
13239 goto init_err_pci_clean;
13241 rc = bnxt_fw_init_one_p2(bp);
13243 goto init_err_pci_clean;
13245 rc = bnxt_map_db_bar(bp);
13247 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13249 goto init_err_pci_clean;
13252 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13253 NETIF_F_TSO | NETIF_F_TSO6 |
13254 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13255 NETIF_F_GSO_IPXIP4 |
13256 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13257 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13258 NETIF_F_RXCSUM | NETIF_F_GRO;
13260 if (BNXT_SUPPORTS_TPA(bp))
13261 dev->hw_features |= NETIF_F_LRO;
13263 dev->hw_enc_features =
13264 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13265 NETIF_F_TSO | NETIF_F_TSO6 |
13266 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13267 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13268 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13269 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13271 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13272 NETIF_F_GSO_GRE_CSUM;
13273 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13274 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13275 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13276 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13277 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13278 if (BNXT_SUPPORTS_TPA(bp))
13279 dev->hw_features |= NETIF_F_GRO_HW;
13280 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13281 if (dev->features & NETIF_F_GRO_HW)
13282 dev->features &= ~NETIF_F_LRO;
13283 dev->priv_flags |= IFF_UNICAST_FLT;
13285 #ifdef CONFIG_BNXT_SRIOV
13286 init_waitqueue_head(&bp->sriov_cfg_wait);
13287 mutex_init(&bp->sriov_lock);
13289 if (BNXT_SUPPORTS_TPA(bp)) {
13290 bp->gro_func = bnxt_gro_func_5730x;
13291 if (BNXT_CHIP_P4(bp))
13292 bp->gro_func = bnxt_gro_func_5731x;
13293 else if (BNXT_CHIP_P5(bp))
13294 bp->gro_func = bnxt_gro_func_5750x;
13296 if (!BNXT_CHIP_P4_PLUS(bp))
13297 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13299 rc = bnxt_init_mac_addr(bp);
13301 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13302 rc = -EADDRNOTAVAIL;
13303 goto init_err_pci_clean;
13307 /* Read the adapter's DSN to use as the eswitch switch_id */
13308 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13311 /* MTU range: 60 - FW defined max */
13312 dev->min_mtu = ETH_ZLEN;
13313 dev->max_mtu = bp->max_mtu;
13315 rc = bnxt_probe_phy(bp, true);
13317 goto init_err_pci_clean;
13319 bnxt_set_rx_skb_mode(bp, false);
13320 bnxt_set_tpa_flags(bp);
13321 bnxt_set_ring_params(bp);
13322 rc = bnxt_set_dflt_rings(bp, true);
13324 netdev_err(bp->dev, "Not enough rings available.\n");
13326 goto init_err_pci_clean;
13329 bnxt_fw_init_one_p3(bp);
13331 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13332 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13334 rc = bnxt_init_int_mode(bp);
13336 goto init_err_pci_clean;
13338 /* No TC has been set yet and rings may have been trimmed due to
13339 * limited MSIX, so we re-initialize the TX rings per TC.
13341 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13346 create_singlethread_workqueue("bnxt_pf_wq");
13348 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13350 goto init_err_pci_clean;
13353 rc = bnxt_init_tc(bp);
13355 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13359 bnxt_inv_fw_health_reg(bp);
13360 bnxt_dl_register(bp);
13362 rc = register_netdev(dev);
13364 goto init_err_cleanup;
13367 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13368 bnxt_dl_fw_reporters_create(bp);
13370 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13371 board_info[ent->driver_data].name,
13372 (long)pci_resource_start(pdev, 0), dev->dev_addr);
13373 pcie_print_link_status(pdev);
13375 pci_save_state(pdev);
13379 bnxt_dl_unregister(bp);
13380 bnxt_shutdown_tc(bp);
13381 bnxt_clear_int_mode(bp);
13383 init_err_pci_clean:
13384 bnxt_hwrm_func_drv_unrgtr(bp);
13385 bnxt_free_hwrm_resources(bp);
13386 bnxt_ethtool_free(bp);
13387 bnxt_ptp_clear(bp);
13388 kfree(bp->ptp_cfg);
13389 bp->ptp_cfg = NULL;
13390 kfree(bp->fw_health);
13391 bp->fw_health = NULL;
13392 bnxt_cleanup_pci(bp);
13393 bnxt_free_ctx_mem(bp);
13396 kfree(bp->rss_indir_tbl);
13397 bp->rss_indir_tbl = NULL;
13404 static void bnxt_shutdown(struct pci_dev *pdev)
13406 struct net_device *dev = pci_get_drvdata(pdev);
13413 bp = netdev_priv(dev);
13415 goto shutdown_exit;
13417 if (netif_running(dev))
13420 bnxt_ulp_shutdown(bp);
13421 bnxt_clear_int_mode(bp);
13422 pci_disable_device(pdev);
13424 if (system_state == SYSTEM_POWER_OFF) {
13425 pci_wake_from_d3(pdev, bp->wol);
13426 pci_set_power_state(pdev, PCI_D3hot);
13433 #ifdef CONFIG_PM_SLEEP
13434 static int bnxt_suspend(struct device *device)
13436 struct net_device *dev = dev_get_drvdata(device);
13437 struct bnxt *bp = netdev_priv(dev);
13442 if (netif_running(dev)) {
13443 netif_device_detach(dev);
13444 rc = bnxt_close(dev);
13446 bnxt_hwrm_func_drv_unrgtr(bp);
13447 pci_disable_device(bp->pdev);
13448 bnxt_free_ctx_mem(bp);
13455 static int bnxt_resume(struct device *device)
13457 struct net_device *dev = dev_get_drvdata(device);
13458 struct bnxt *bp = netdev_priv(dev);
13462 rc = pci_enable_device(bp->pdev);
13464 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13468 pci_set_master(bp->pdev);
13469 if (bnxt_hwrm_ver_get(bp)) {
13473 rc = bnxt_hwrm_func_reset(bp);
13479 rc = bnxt_hwrm_func_qcaps(bp);
13483 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13488 bnxt_get_wol_settings(bp);
13489 if (netif_running(dev)) {
13490 rc = bnxt_open(dev);
13492 netif_device_attach(dev);
13496 bnxt_ulp_start(bp, rc);
13498 bnxt_reenable_sriov(bp);
13503 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13504 #define BNXT_PM_OPS (&bnxt_pm_ops)
13508 #define BNXT_PM_OPS NULL
13510 #endif /* CONFIG_PM_SLEEP */
13513 * bnxt_io_error_detected - called when PCI error is detected
13514 * @pdev: Pointer to PCI device
13515 * @state: The current pci connection state
13517 * This function is called after a PCI bus error affecting
13518 * this device has been detected.
13520 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13521 pci_channel_state_t state)
13523 struct net_device *netdev = pci_get_drvdata(pdev);
13524 struct bnxt *bp = netdev_priv(netdev);
13526 netdev_info(netdev, "PCI I/O error detected\n");
13529 netif_device_detach(netdev);
13533 if (state == pci_channel_io_perm_failure) {
13535 return PCI_ERS_RESULT_DISCONNECT;
13538 if (state == pci_channel_io_frozen)
13539 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13541 if (netif_running(netdev))
13542 bnxt_close(netdev);
13544 if (pci_is_enabled(pdev))
13545 pci_disable_device(pdev);
13546 bnxt_free_ctx_mem(bp);
13551 /* Request a slot slot reset. */
13552 return PCI_ERS_RESULT_NEED_RESET;
13556 * bnxt_io_slot_reset - called after the pci bus has been reset.
13557 * @pdev: Pointer to PCI device
13559 * Restart the card from scratch, as if from a cold-boot.
13560 * At this point, the card has exprienced a hard reset,
13561 * followed by fixups by BIOS, and has its config space
13562 * set up identically to what it was at cold boot.
13564 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13566 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13567 struct net_device *netdev = pci_get_drvdata(pdev);
13568 struct bnxt *bp = netdev_priv(netdev);
13571 netdev_info(bp->dev, "PCI Slot Reset\n");
13575 if (pci_enable_device(pdev)) {
13576 dev_err(&pdev->dev,
13577 "Cannot re-enable PCI device after reset.\n");
13579 pci_set_master(pdev);
13580 /* Upon fatal error, our device internal logic that latches to
13581 * BAR value is getting reset and will restore only upon
13582 * rewritting the BARs.
13584 * As pci_restore_state() does not re-write the BARs if the
13585 * value is same as saved value earlier, driver needs to
13586 * write the BARs to 0 to force restore, in case of fatal error.
13588 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13590 for (off = PCI_BASE_ADDRESS_0;
13591 off <= PCI_BASE_ADDRESS_5; off += 4)
13592 pci_write_config_dword(bp->pdev, off, 0);
13594 pci_restore_state(pdev);
13595 pci_save_state(pdev);
13597 err = bnxt_hwrm_func_reset(bp);
13599 result = PCI_ERS_RESULT_RECOVERED;
13608 * bnxt_io_resume - called when traffic can start flowing again.
13609 * @pdev: Pointer to PCI device
13611 * This callback is called when the error recovery driver tells
13612 * us that its OK to resume normal operation.
13614 static void bnxt_io_resume(struct pci_dev *pdev)
13616 struct net_device *netdev = pci_get_drvdata(pdev);
13617 struct bnxt *bp = netdev_priv(netdev);
13620 netdev_info(bp->dev, "PCI Slot Resume\n");
13623 err = bnxt_hwrm_func_qcaps(bp);
13624 if (!err && netif_running(netdev))
13625 err = bnxt_open(netdev);
13627 bnxt_ulp_start(bp, err);
13629 bnxt_reenable_sriov(bp);
13630 netif_device_attach(netdev);
13636 static const struct pci_error_handlers bnxt_err_handler = {
13637 .error_detected = bnxt_io_error_detected,
13638 .slot_reset = bnxt_io_slot_reset,
13639 .resume = bnxt_io_resume
13642 static struct pci_driver bnxt_pci_driver = {
13643 .name = DRV_MODULE_NAME,
13644 .id_table = bnxt_pci_tbl,
13645 .probe = bnxt_init_one,
13646 .remove = bnxt_remove_one,
13647 .shutdown = bnxt_shutdown,
13648 .driver.pm = BNXT_PM_OPS,
13649 .err_handler = &bnxt_err_handler,
13650 #if defined(CONFIG_BNXT_SRIOV)
13651 .sriov_configure = bnxt_sriov_configure,
13655 static int __init bnxt_init(void)
13658 return pci_register_driver(&bnxt_pci_driver);
13661 static void __exit bnxt_exit(void)
13663 pci_unregister_driver(&bnxt_pci_driver);
13665 destroy_workqueue(bnxt_pf_wq);
13669 module_init(bnxt_init);
13670 module_exit(bnxt_exit);