1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
71 #define BNXT_TX_TIMEOUT (5 * HZ)
73 static const char version[] =
74 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
76 MODULE_LICENSE("GPL");
77 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
78 MODULE_VERSION(DRV_MODULE_VERSION);
80 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82 #define BNXT_RX_COPY_THRESH 256
84 #define BNXT_TX_PUSH_THRESH 164
131 /* indexed by enum above */
132 static const struct {
135 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
136 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
137 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
138 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
139 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
140 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
141 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
142 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
143 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
144 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
145 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
146 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
147 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
148 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
149 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
151 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
152 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
153 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
154 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
155 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
156 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
157 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
158 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
159 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
160 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
161 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
162 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
163 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
164 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
165 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
166 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
167 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
168 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
169 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
170 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
171 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
172 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
173 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
174 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
175 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
176 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
179 static const struct pci_device_id bnxt_pci_tbl[] = {
180 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
182 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
183 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
184 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
185 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
186 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
187 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
188 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
189 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
190 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
191 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
192 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
193 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
194 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
195 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
196 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
197 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
198 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
199 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
200 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
201 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
202 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
203 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
204 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
205 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
206 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
207 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
208 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
209 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
210 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
211 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
212 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
213 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
214 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
215 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
216 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
217 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
218 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
219 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
220 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
221 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
222 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
223 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
224 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
225 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
226 #ifdef CONFIG_BNXT_SRIOV
227 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
228 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
229 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
230 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
231 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
232 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
233 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
234 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
235 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
236 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
237 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
242 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
244 static const u16 bnxt_vf_req_snif[] = {
248 HWRM_CFA_L2_FILTER_ALLOC,
251 static const u16 bnxt_async_events_arr[] = {
252 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
253 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
254 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
255 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
256 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
257 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
258 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
259 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
260 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
263 static struct workqueue_struct *bnxt_pf_wq;
265 static bool bnxt_vf_pciid(enum board_idx idx)
267 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
268 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
271 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
272 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
273 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
275 #define BNXT_CP_DB_IRQ_DIS(db) \
276 writel(DB_CP_IRQ_DIS_FLAGS, db)
278 #define BNXT_DB_CQ(db, idx) \
279 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
281 #define BNXT_DB_NQ_P5(db, idx) \
282 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
284 #define BNXT_DB_CQ_ARM(db, idx) \
285 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
287 #define BNXT_DB_NQ_ARM_P5(db, idx) \
288 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
290 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
292 if (bp->flags & BNXT_FLAG_CHIP_P5)
293 BNXT_DB_NQ_P5(db, idx);
298 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
300 if (bp->flags & BNXT_FLAG_CHIP_P5)
301 BNXT_DB_NQ_ARM_P5(db, idx);
303 BNXT_DB_CQ_ARM(db, idx);
306 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
308 if (bp->flags & BNXT_FLAG_CHIP_P5)
309 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
315 const u16 bnxt_lhint_arr[] = {
316 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
317 TX_BD_FLAGS_LHINT_512_TO_1023,
318 TX_BD_FLAGS_LHINT_1024_TO_2047,
319 TX_BD_FLAGS_LHINT_1024_TO_2047,
320 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
321 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
322 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
323 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
324 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
325 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
326 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
327 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
328 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
329 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
330 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
331 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
332 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
333 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
334 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
337 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
339 struct metadata_dst *md_dst = skb_metadata_dst(skb);
341 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
344 return md_dst->u.port_info.port_id;
347 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
349 struct bnxt *bp = netdev_priv(dev);
351 struct tx_bd_ext *txbd1;
352 struct netdev_queue *txq;
355 unsigned int length, pad = 0;
356 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
358 struct pci_dev *pdev = bp->pdev;
359 struct bnxt_tx_ring_info *txr;
360 struct bnxt_sw_tx_bd *tx_buf;
362 i = skb_get_queue_mapping(skb);
363 if (unlikely(i >= bp->tx_nr_rings)) {
364 dev_kfree_skb_any(skb);
368 txq = netdev_get_tx_queue(dev, i);
369 txr = &bp->tx_ring[bp->tx_ring_map[i]];
372 free_size = bnxt_tx_avail(bp, txr);
373 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
374 netif_tx_stop_queue(txq);
375 return NETDEV_TX_BUSY;
379 len = skb_headlen(skb);
380 last_frag = skb_shinfo(skb)->nr_frags;
382 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
384 txbd->tx_bd_opaque = prod;
386 tx_buf = &txr->tx_buf_ring[prod];
388 tx_buf->nr_frags = last_frag;
391 cfa_action = bnxt_xmit_get_cfa_action(skb);
392 if (skb_vlan_tag_present(skb)) {
393 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
394 skb_vlan_tag_get(skb);
395 /* Currently supports 8021Q, 8021AD vlan offloads
396 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
398 if (skb->vlan_proto == htons(ETH_P_8021Q))
399 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
402 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
403 struct tx_push_buffer *tx_push_buf = txr->tx_push;
404 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
405 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
406 void __iomem *db = txr->tx_db.doorbell;
407 void *pdata = tx_push_buf->data;
411 /* Set COAL_NOW to be ready quickly for the next push */
412 tx_push->tx_bd_len_flags_type =
413 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
414 TX_BD_TYPE_LONG_TX_BD |
415 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
416 TX_BD_FLAGS_COAL_NOW |
417 TX_BD_FLAGS_PACKET_END |
418 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
420 if (skb->ip_summed == CHECKSUM_PARTIAL)
421 tx_push1->tx_bd_hsize_lflags =
422 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
424 tx_push1->tx_bd_hsize_lflags = 0;
426 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
427 tx_push1->tx_bd_cfa_action =
428 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
430 end = pdata + length;
431 end = PTR_ALIGN(end, 8) - 1;
434 skb_copy_from_linear_data(skb, pdata, len);
436 for (j = 0; j < last_frag; j++) {
437 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
440 fptr = skb_frag_address_safe(frag);
444 memcpy(pdata, fptr, skb_frag_size(frag));
445 pdata += skb_frag_size(frag);
448 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
449 txbd->tx_bd_haddr = txr->data_mapping;
450 prod = NEXT_TX(prod);
451 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
452 memcpy(txbd, tx_push1, sizeof(*txbd));
453 prod = NEXT_TX(prod);
455 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
459 netdev_tx_sent_queue(txq, skb->len);
460 wmb(); /* Sync is_push and byte queue before pushing data */
462 push_len = (length + sizeof(*tx_push) + 7) / 8;
464 __iowrite64_copy(db, tx_push_buf, 16);
465 __iowrite32_copy(db + 4, tx_push_buf + 1,
466 (push_len - 16) << 1);
468 __iowrite64_copy(db, tx_push_buf, push_len);
475 if (length < BNXT_MIN_PKT_SIZE) {
476 pad = BNXT_MIN_PKT_SIZE - length;
477 if (skb_pad(skb, pad)) {
478 /* SKB already freed. */
482 length = BNXT_MIN_PKT_SIZE;
485 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
487 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
488 dev_kfree_skb_any(skb);
493 dma_unmap_addr_set(tx_buf, mapping, mapping);
494 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
495 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
497 txbd->tx_bd_haddr = cpu_to_le64(mapping);
499 prod = NEXT_TX(prod);
500 txbd1 = (struct tx_bd_ext *)
501 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
503 txbd1->tx_bd_hsize_lflags = 0;
504 if (skb_is_gso(skb)) {
507 if (skb->encapsulation)
508 hdr_len = skb_inner_network_offset(skb) +
509 skb_inner_network_header_len(skb) +
510 inner_tcp_hdrlen(skb);
512 hdr_len = skb_transport_offset(skb) +
515 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
517 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
518 length = skb_shinfo(skb)->gso_size;
519 txbd1->tx_bd_mss = cpu_to_le32(length);
521 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
522 txbd1->tx_bd_hsize_lflags =
523 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
524 txbd1->tx_bd_mss = 0;
528 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
529 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
534 flags |= bnxt_lhint_arr[length];
535 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
537 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
538 txbd1->tx_bd_cfa_action =
539 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
540 for (i = 0; i < last_frag; i++) {
541 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
543 prod = NEXT_TX(prod);
544 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
546 len = skb_frag_size(frag);
547 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
550 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
553 tx_buf = &txr->tx_buf_ring[prod];
554 dma_unmap_addr_set(tx_buf, mapping, mapping);
556 txbd->tx_bd_haddr = cpu_to_le64(mapping);
558 flags = len << TX_BD_LEN_SHIFT;
559 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
563 txbd->tx_bd_len_flags_type =
564 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
565 TX_BD_FLAGS_PACKET_END);
567 netdev_tx_sent_queue(txq, skb->len);
569 /* Sync BD data before updating doorbell */
572 prod = NEXT_TX(prod);
575 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
576 bnxt_db_write(bp, &txr->tx_db, prod);
580 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
581 if (netdev_xmit_more() && !tx_buf->is_push)
582 bnxt_db_write(bp, &txr->tx_db, prod);
584 netif_tx_stop_queue(txq);
586 /* netif_tx_stop_queue() must be done before checking
587 * tx index in bnxt_tx_avail() below, because in
588 * bnxt_tx_int(), we update tx index before checking for
589 * netif_tx_queue_stopped().
592 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
593 netif_tx_wake_queue(txq);
600 /* start back at beginning and unmap skb */
602 tx_buf = &txr->tx_buf_ring[prod];
604 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
605 skb_headlen(skb), PCI_DMA_TODEVICE);
606 prod = NEXT_TX(prod);
608 /* unmap remaining mapped pages */
609 for (i = 0; i < last_frag; i++) {
610 prod = NEXT_TX(prod);
611 tx_buf = &txr->tx_buf_ring[prod];
612 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
613 skb_frag_size(&skb_shinfo(skb)->frags[i]),
617 dev_kfree_skb_any(skb);
621 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
623 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
624 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
625 u16 cons = txr->tx_cons;
626 struct pci_dev *pdev = bp->pdev;
628 unsigned int tx_bytes = 0;
630 for (i = 0; i < nr_pkts; i++) {
631 struct bnxt_sw_tx_bd *tx_buf;
635 tx_buf = &txr->tx_buf_ring[cons];
636 cons = NEXT_TX(cons);
640 if (tx_buf->is_push) {
645 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
646 skb_headlen(skb), PCI_DMA_TODEVICE);
647 last = tx_buf->nr_frags;
649 for (j = 0; j < last; j++) {
650 cons = NEXT_TX(cons);
651 tx_buf = &txr->tx_buf_ring[cons];
654 dma_unmap_addr(tx_buf, mapping),
655 skb_frag_size(&skb_shinfo(skb)->frags[j]),
660 cons = NEXT_TX(cons);
662 tx_bytes += skb->len;
663 dev_kfree_skb_any(skb);
666 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
669 /* Need to make the tx_cons update visible to bnxt_start_xmit()
670 * before checking for netif_tx_queue_stopped(). Without the
671 * memory barrier, there is a small possibility that bnxt_start_xmit()
672 * will miss it and cause the queue to be stopped forever.
676 if (unlikely(netif_tx_queue_stopped(txq)) &&
677 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
678 __netif_tx_lock(txq, smp_processor_id());
679 if (netif_tx_queue_stopped(txq) &&
680 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
681 txr->dev_state != BNXT_DEV_STATE_CLOSING)
682 netif_tx_wake_queue(txq);
683 __netif_tx_unlock(txq);
687 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
688 struct bnxt_rx_ring_info *rxr,
691 struct device *dev = &bp->pdev->dev;
694 page = page_pool_dev_alloc_pages(rxr->page_pool);
698 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
699 DMA_ATTR_WEAK_ORDERING);
700 if (dma_mapping_error(dev, *mapping)) {
701 page_pool_recycle_direct(rxr->page_pool, page);
704 *mapping += bp->rx_dma_offset;
708 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
712 struct pci_dev *pdev = bp->pdev;
714 data = kmalloc(bp->rx_buf_size, gfp);
718 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
719 bp->rx_buf_use_size, bp->rx_dir,
720 DMA_ATTR_WEAK_ORDERING);
722 if (dma_mapping_error(&pdev->dev, *mapping)) {
729 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
732 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
733 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
736 if (BNXT_RX_PAGE_MODE(bp)) {
738 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
744 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
746 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
752 rx_buf->data_ptr = data + bp->rx_offset;
754 rx_buf->mapping = mapping;
756 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
760 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
762 u16 prod = rxr->rx_prod;
763 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
764 struct rx_bd *cons_bd, *prod_bd;
766 prod_rx_buf = &rxr->rx_buf_ring[prod];
767 cons_rx_buf = &rxr->rx_buf_ring[cons];
769 prod_rx_buf->data = data;
770 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
772 prod_rx_buf->mapping = cons_rx_buf->mapping;
774 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
775 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
777 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
780 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
782 u16 next, max = rxr->rx_agg_bmap_size;
784 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
786 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
790 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
791 struct bnxt_rx_ring_info *rxr,
795 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
796 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
797 struct pci_dev *pdev = bp->pdev;
800 u16 sw_prod = rxr->rx_sw_agg_prod;
801 unsigned int offset = 0;
803 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
806 page = alloc_page(gfp);
810 rxr->rx_page_offset = 0;
812 offset = rxr->rx_page_offset;
813 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
814 if (rxr->rx_page_offset == PAGE_SIZE)
819 page = alloc_page(gfp);
824 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
825 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
826 DMA_ATTR_WEAK_ORDERING);
827 if (dma_mapping_error(&pdev->dev, mapping)) {
832 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
833 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
835 __set_bit(sw_prod, rxr->rx_agg_bmap);
836 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
837 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
839 rx_agg_buf->page = page;
840 rx_agg_buf->offset = offset;
841 rx_agg_buf->mapping = mapping;
842 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
843 rxbd->rx_bd_opaque = sw_prod;
847 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
848 struct bnxt_cp_ring_info *cpr,
849 u16 cp_cons, u16 curr)
851 struct rx_agg_cmp *agg;
853 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
854 agg = (struct rx_agg_cmp *)
855 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
859 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
860 struct bnxt_rx_ring_info *rxr,
861 u16 agg_id, u16 curr)
863 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
865 return &tpa_info->agg_arr[curr];
868 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
869 u16 start, u32 agg_bufs, bool tpa)
871 struct bnxt_napi *bnapi = cpr->bnapi;
872 struct bnxt *bp = bnapi->bp;
873 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
874 u16 prod = rxr->rx_agg_prod;
875 u16 sw_prod = rxr->rx_sw_agg_prod;
879 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
882 for (i = 0; i < agg_bufs; i++) {
884 struct rx_agg_cmp *agg;
885 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
886 struct rx_bd *prod_bd;
890 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
892 agg = bnxt_get_agg(bp, cpr, idx, start + i);
893 cons = agg->rx_agg_cmp_opaque;
894 __clear_bit(cons, rxr->rx_agg_bmap);
896 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
897 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
899 __set_bit(sw_prod, rxr->rx_agg_bmap);
900 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
901 cons_rx_buf = &rxr->rx_agg_ring[cons];
903 /* It is possible for sw_prod to be equal to cons, so
904 * set cons_rx_buf->page to NULL first.
906 page = cons_rx_buf->page;
907 cons_rx_buf->page = NULL;
908 prod_rx_buf->page = page;
909 prod_rx_buf->offset = cons_rx_buf->offset;
911 prod_rx_buf->mapping = cons_rx_buf->mapping;
913 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
915 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
916 prod_bd->rx_bd_opaque = sw_prod;
918 prod = NEXT_RX_AGG(prod);
919 sw_prod = NEXT_RX_AGG(sw_prod);
921 rxr->rx_agg_prod = prod;
922 rxr->rx_sw_agg_prod = sw_prod;
925 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
926 struct bnxt_rx_ring_info *rxr,
927 u16 cons, void *data, u8 *data_ptr,
929 unsigned int offset_and_len)
931 unsigned int payload = offset_and_len >> 16;
932 unsigned int len = offset_and_len & 0xffff;
934 struct page *page = data;
935 u16 prod = rxr->rx_prod;
939 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
941 bnxt_reuse_rx_data(rxr, cons, data);
944 dma_addr -= bp->rx_dma_offset;
945 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
946 DMA_ATTR_WEAK_ORDERING);
948 if (unlikely(!payload))
949 payload = eth_get_headlen(bp->dev, data_ptr, len);
951 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
957 off = (void *)data_ptr - page_address(page);
958 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
959 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
960 payload + NET_IP_ALIGN);
962 frag = &skb_shinfo(skb)->frags[0];
963 skb_frag_size_sub(frag, payload);
964 skb_frag_off_add(frag, payload);
965 skb->data_len -= payload;
966 skb->tail += payload;
971 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
972 struct bnxt_rx_ring_info *rxr, u16 cons,
973 void *data, u8 *data_ptr,
975 unsigned int offset_and_len)
977 u16 prod = rxr->rx_prod;
981 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
983 bnxt_reuse_rx_data(rxr, cons, data);
987 skb = build_skb(data, 0);
988 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
989 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
995 skb_reserve(skb, bp->rx_offset);
996 skb_put(skb, offset_and_len & 0xffff);
1000 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1001 struct bnxt_cp_ring_info *cpr,
1002 struct sk_buff *skb, u16 idx,
1003 u32 agg_bufs, bool tpa)
1005 struct bnxt_napi *bnapi = cpr->bnapi;
1006 struct pci_dev *pdev = bp->pdev;
1007 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1008 u16 prod = rxr->rx_agg_prod;
1009 bool p5_tpa = false;
1012 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1015 for (i = 0; i < agg_bufs; i++) {
1017 struct rx_agg_cmp *agg;
1018 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1023 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1025 agg = bnxt_get_agg(bp, cpr, idx, i);
1026 cons = agg->rx_agg_cmp_opaque;
1027 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1028 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1030 cons_rx_buf = &rxr->rx_agg_ring[cons];
1031 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1032 cons_rx_buf->offset, frag_len);
1033 __clear_bit(cons, rxr->rx_agg_bmap);
1035 /* It is possible for bnxt_alloc_rx_page() to allocate
1036 * a sw_prod index that equals the cons index, so we
1037 * need to clear the cons entry now.
1039 mapping = cons_rx_buf->mapping;
1040 page = cons_rx_buf->page;
1041 cons_rx_buf->page = NULL;
1043 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1044 struct skb_shared_info *shinfo;
1045 unsigned int nr_frags;
1047 shinfo = skb_shinfo(skb);
1048 nr_frags = --shinfo->nr_frags;
1049 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1053 cons_rx_buf->page = page;
1055 /* Update prod since possibly some pages have been
1056 * allocated already.
1058 rxr->rx_agg_prod = prod;
1059 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1063 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1065 DMA_ATTR_WEAK_ORDERING);
1067 skb->data_len += frag_len;
1068 skb->len += frag_len;
1069 skb->truesize += PAGE_SIZE;
1071 prod = NEXT_RX_AGG(prod);
1073 rxr->rx_agg_prod = prod;
1077 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1078 u8 agg_bufs, u32 *raw_cons)
1081 struct rx_agg_cmp *agg;
1083 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1084 last = RING_CMP(*raw_cons);
1085 agg = (struct rx_agg_cmp *)
1086 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1087 return RX_AGG_CMP_VALID(agg, *raw_cons);
1090 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1094 struct bnxt *bp = bnapi->bp;
1095 struct pci_dev *pdev = bp->pdev;
1096 struct sk_buff *skb;
1098 skb = napi_alloc_skb(&bnapi->napi, len);
1102 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1105 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1106 len + NET_IP_ALIGN);
1108 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1115 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1116 u32 *raw_cons, void *cmp)
1118 struct rx_cmp *rxcmp = cmp;
1119 u32 tmp_raw_cons = *raw_cons;
1120 u8 cmp_type, agg_bufs = 0;
1122 cmp_type = RX_CMP_TYPE(rxcmp);
1124 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1125 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1127 RX_CMP_AGG_BUFS_SHIFT;
1128 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1129 struct rx_tpa_end_cmp *tpa_end = cmp;
1131 if (bp->flags & BNXT_FLAG_CHIP_P5)
1134 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1138 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1141 *raw_cons = tmp_raw_cons;
1145 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1148 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1150 schedule_delayed_work(&bp->fw_reset_task, delay);
1153 static void bnxt_queue_sp_work(struct bnxt *bp)
1156 queue_work(bnxt_pf_wq, &bp->sp_task);
1158 schedule_work(&bp->sp_task);
1161 static void bnxt_cancel_sp_work(struct bnxt *bp)
1164 flush_workqueue(bnxt_pf_wq);
1166 cancel_work_sync(&bp->sp_task);
1169 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1171 if (!rxr->bnapi->in_reset) {
1172 rxr->bnapi->in_reset = true;
1173 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1174 bnxt_queue_sp_work(bp);
1176 rxr->rx_next_cons = 0xffff;
1179 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1181 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1182 u16 idx = agg_id & MAX_TPA_P5_MASK;
1184 if (test_bit(idx, map->agg_idx_bmap))
1185 idx = find_first_zero_bit(map->agg_idx_bmap,
1186 BNXT_AGG_IDX_BMAP_SIZE);
1187 __set_bit(idx, map->agg_idx_bmap);
1188 map->agg_id_tbl[agg_id] = idx;
1192 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1194 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1196 __clear_bit(idx, map->agg_idx_bmap);
1199 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1201 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1203 return map->agg_id_tbl[agg_id];
1206 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1207 struct rx_tpa_start_cmp *tpa_start,
1208 struct rx_tpa_start_cmp_ext *tpa_start1)
1210 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1211 struct bnxt_tpa_info *tpa_info;
1212 u16 cons, prod, agg_id;
1213 struct rx_bd *prod_bd;
1216 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1217 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1218 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1220 agg_id = TPA_START_AGG_ID(tpa_start);
1222 cons = tpa_start->rx_tpa_start_cmp_opaque;
1223 prod = rxr->rx_prod;
1224 cons_rx_buf = &rxr->rx_buf_ring[cons];
1225 prod_rx_buf = &rxr->rx_buf_ring[prod];
1226 tpa_info = &rxr->rx_tpa[agg_id];
1228 if (unlikely(cons != rxr->rx_next_cons ||
1229 TPA_START_ERROR(tpa_start))) {
1230 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1231 cons, rxr->rx_next_cons,
1232 TPA_START_ERROR_CODE(tpa_start1));
1233 bnxt_sched_reset(bp, rxr);
1236 /* Store cfa_code in tpa_info to use in tpa_end
1237 * completion processing.
1239 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1240 prod_rx_buf->data = tpa_info->data;
1241 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1243 mapping = tpa_info->mapping;
1244 prod_rx_buf->mapping = mapping;
1246 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1248 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1250 tpa_info->data = cons_rx_buf->data;
1251 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1252 cons_rx_buf->data = NULL;
1253 tpa_info->mapping = cons_rx_buf->mapping;
1256 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1257 RX_TPA_START_CMP_LEN_SHIFT;
1258 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1259 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1261 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1262 tpa_info->gso_type = SKB_GSO_TCPV4;
1263 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1264 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1265 tpa_info->gso_type = SKB_GSO_TCPV6;
1266 tpa_info->rss_hash =
1267 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1269 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1270 tpa_info->gso_type = 0;
1271 if (netif_msg_rx_err(bp))
1272 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1274 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1275 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1276 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1277 tpa_info->agg_count = 0;
1279 rxr->rx_prod = NEXT_RX(prod);
1280 cons = NEXT_RX(cons);
1281 rxr->rx_next_cons = NEXT_RX(cons);
1282 cons_rx_buf = &rxr->rx_buf_ring[cons];
1284 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1285 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1286 cons_rx_buf->data = NULL;
1289 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1292 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1296 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1298 struct udphdr *uh = NULL;
1300 if (ip_proto == htons(ETH_P_IP)) {
1301 struct iphdr *iph = (struct iphdr *)skb->data;
1303 if (iph->protocol == IPPROTO_UDP)
1304 uh = (struct udphdr *)(iph + 1);
1306 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1308 if (iph->nexthdr == IPPROTO_UDP)
1309 uh = (struct udphdr *)(iph + 1);
1313 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1315 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1320 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1321 int payload_off, int tcp_ts,
1322 struct sk_buff *skb)
1327 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1328 u32 hdr_info = tpa_info->hdr_info;
1329 bool loopback = false;
1331 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1332 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1333 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1335 /* If the packet is an internal loopback packet, the offsets will
1336 * have an extra 4 bytes.
1338 if (inner_mac_off == 4) {
1340 } else if (inner_mac_off > 4) {
1341 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1344 /* We only support inner iPv4/ipv6. If we don't see the
1345 * correct protocol ID, it must be a loopback packet where
1346 * the offsets are off by 4.
1348 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1352 /* internal loopback packet, subtract all offsets by 4 */
1358 nw_off = inner_ip_off - ETH_HLEN;
1359 skb_set_network_header(skb, nw_off);
1360 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1361 struct ipv6hdr *iph = ipv6_hdr(skb);
1363 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1364 len = skb->len - skb_transport_offset(skb);
1366 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1368 struct iphdr *iph = ip_hdr(skb);
1370 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1371 len = skb->len - skb_transport_offset(skb);
1373 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1376 if (inner_mac_off) { /* tunnel */
1377 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1380 bnxt_gro_tunnel(skb, proto);
1386 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1387 int payload_off, int tcp_ts,
1388 struct sk_buff *skb)
1391 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1392 u32 hdr_info = tpa_info->hdr_info;
1393 int iphdr_len, nw_off;
1395 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1396 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1397 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1399 nw_off = inner_ip_off - ETH_HLEN;
1400 skb_set_network_header(skb, nw_off);
1401 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1402 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1403 skb_set_transport_header(skb, nw_off + iphdr_len);
1405 if (inner_mac_off) { /* tunnel */
1406 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1409 bnxt_gro_tunnel(skb, proto);
1415 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1416 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1418 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1419 int payload_off, int tcp_ts,
1420 struct sk_buff *skb)
1424 int len, nw_off, tcp_opt_len = 0;
1429 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1432 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1434 skb_set_network_header(skb, nw_off);
1436 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1437 len = skb->len - skb_transport_offset(skb);
1439 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1440 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1441 struct ipv6hdr *iph;
1443 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1445 skb_set_network_header(skb, nw_off);
1446 iph = ipv6_hdr(skb);
1447 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1448 len = skb->len - skb_transport_offset(skb);
1450 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1452 dev_kfree_skb_any(skb);
1456 if (nw_off) /* tunnel */
1457 bnxt_gro_tunnel(skb, skb->protocol);
1462 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1463 struct bnxt_tpa_info *tpa_info,
1464 struct rx_tpa_end_cmp *tpa_end,
1465 struct rx_tpa_end_cmp_ext *tpa_end1,
1466 struct sk_buff *skb)
1472 segs = TPA_END_TPA_SEGS(tpa_end);
1476 NAPI_GRO_CB(skb)->count = segs;
1477 skb_shinfo(skb)->gso_size =
1478 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1479 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1480 if (bp->flags & BNXT_FLAG_CHIP_P5)
1481 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1483 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1484 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1486 tcp_gro_complete(skb);
1491 /* Given the cfa_code of a received packet determine which
1492 * netdev (vf-rep or PF) the packet is destined to.
1494 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1496 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1498 /* if vf-rep dev is NULL, the must belongs to the PF */
1499 return dev ? dev : bp->dev;
1502 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1503 struct bnxt_cp_ring_info *cpr,
1505 struct rx_tpa_end_cmp *tpa_end,
1506 struct rx_tpa_end_cmp_ext *tpa_end1,
1509 struct bnxt_napi *bnapi = cpr->bnapi;
1510 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1511 u8 *data_ptr, agg_bufs;
1513 struct bnxt_tpa_info *tpa_info;
1515 struct sk_buff *skb;
1516 u16 idx = 0, agg_id;
1520 if (unlikely(bnapi->in_reset)) {
1521 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1524 return ERR_PTR(-EBUSY);
1528 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1529 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1530 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1531 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1532 tpa_info = &rxr->rx_tpa[agg_id];
1533 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1534 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1535 agg_bufs, tpa_info->agg_count);
1536 agg_bufs = tpa_info->agg_count;
1538 tpa_info->agg_count = 0;
1539 *event |= BNXT_AGG_EVENT;
1540 bnxt_free_agg_idx(rxr, agg_id);
1542 gro = !!(bp->flags & BNXT_FLAG_GRO);
1544 agg_id = TPA_END_AGG_ID(tpa_end);
1545 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1546 tpa_info = &rxr->rx_tpa[agg_id];
1547 idx = RING_CMP(*raw_cons);
1549 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1550 return ERR_PTR(-EBUSY);
1552 *event |= BNXT_AGG_EVENT;
1553 idx = NEXT_CMP(idx);
1555 gro = !!TPA_END_GRO(tpa_end);
1557 data = tpa_info->data;
1558 data_ptr = tpa_info->data_ptr;
1560 len = tpa_info->len;
1561 mapping = tpa_info->mapping;
1563 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1564 bnxt_abort_tpa(cpr, idx, agg_bufs);
1565 if (agg_bufs > MAX_SKB_FRAGS)
1566 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1567 agg_bufs, (int)MAX_SKB_FRAGS);
1571 if (len <= bp->rx_copy_thresh) {
1572 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1574 bnxt_abort_tpa(cpr, idx, agg_bufs);
1579 dma_addr_t new_mapping;
1581 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1583 bnxt_abort_tpa(cpr, idx, agg_bufs);
1587 tpa_info->data = new_data;
1588 tpa_info->data_ptr = new_data + bp->rx_offset;
1589 tpa_info->mapping = new_mapping;
1591 skb = build_skb(data, 0);
1592 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1593 bp->rx_buf_use_size, bp->rx_dir,
1594 DMA_ATTR_WEAK_ORDERING);
1598 bnxt_abort_tpa(cpr, idx, agg_bufs);
1601 skb_reserve(skb, bp->rx_offset);
1606 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1608 /* Page reuse already handled by bnxt_rx_pages(). */
1614 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1616 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1617 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1619 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1620 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1621 u16 vlan_proto = tpa_info->metadata >>
1622 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1623 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1625 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1628 skb_checksum_none_assert(skb);
1629 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1630 skb->ip_summed = CHECKSUM_UNNECESSARY;
1632 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1636 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1641 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1642 struct rx_agg_cmp *rx_agg)
1644 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1645 struct bnxt_tpa_info *tpa_info;
1647 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1648 tpa_info = &rxr->rx_tpa[agg_id];
1649 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1650 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1653 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1654 struct sk_buff *skb)
1656 if (skb->dev != bp->dev) {
1657 /* this packet belongs to a vf-rep */
1658 bnxt_vf_rep_rx(bp, skb);
1661 skb_record_rx_queue(skb, bnapi->index);
1662 napi_gro_receive(&bnapi->napi, skb);
1665 /* returns the following:
1666 * 1 - 1 packet successfully received
1667 * 0 - successful TPA_START, packet not completed yet
1668 * -EBUSY - completion ring does not have all the agg buffers yet
1669 * -ENOMEM - packet aborted due to out of memory
1670 * -EIO - packet aborted due to hw error indicated in BD
1672 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1673 u32 *raw_cons, u8 *event)
1675 struct bnxt_napi *bnapi = cpr->bnapi;
1676 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1677 struct net_device *dev = bp->dev;
1678 struct rx_cmp *rxcmp;
1679 struct rx_cmp_ext *rxcmp1;
1680 u32 tmp_raw_cons = *raw_cons;
1681 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1682 struct bnxt_sw_rx_bd *rx_buf;
1684 u8 *data_ptr, agg_bufs, cmp_type;
1685 dma_addr_t dma_addr;
1686 struct sk_buff *skb;
1691 rxcmp = (struct rx_cmp *)
1692 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1694 cmp_type = RX_CMP_TYPE(rxcmp);
1696 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1697 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1698 goto next_rx_no_prod_no_len;
1701 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1702 cp_cons = RING_CMP(tmp_raw_cons);
1703 rxcmp1 = (struct rx_cmp_ext *)
1704 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1706 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1709 prod = rxr->rx_prod;
1711 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1712 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1713 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1715 *event |= BNXT_RX_EVENT;
1716 goto next_rx_no_prod_no_len;
1718 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1719 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1720 (struct rx_tpa_end_cmp *)rxcmp,
1721 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1728 bnxt_deliver_skb(bp, bnapi, skb);
1731 *event |= BNXT_RX_EVENT;
1732 goto next_rx_no_prod_no_len;
1735 cons = rxcmp->rx_cmp_opaque;
1736 if (unlikely(cons != rxr->rx_next_cons)) {
1737 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1739 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1740 cons, rxr->rx_next_cons);
1741 bnxt_sched_reset(bp, rxr);
1744 rx_buf = &rxr->rx_buf_ring[cons];
1745 data = rx_buf->data;
1746 data_ptr = rx_buf->data_ptr;
1749 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1750 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1753 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1756 cp_cons = NEXT_CMP(cp_cons);
1757 *event |= BNXT_AGG_EVENT;
1759 *event |= BNXT_RX_EVENT;
1761 rx_buf->data = NULL;
1762 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1763 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1765 bnxt_reuse_rx_data(rxr, cons, data);
1767 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1771 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1772 bnapi->cp_ring.rx_buf_errors++;
1773 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
1774 netdev_warn(bp->dev, "RX buffer error %x\n",
1776 bnxt_sched_reset(bp, rxr);
1779 goto next_rx_no_len;
1782 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1783 dma_addr = rx_buf->mapping;
1785 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1790 if (len <= bp->rx_copy_thresh) {
1791 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1792 bnxt_reuse_rx_data(rxr, cons, data);
1795 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1803 if (rx_buf->data_ptr == data_ptr)
1804 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1807 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1816 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1823 if (RX_CMP_HASH_VALID(rxcmp)) {
1824 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1825 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1827 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1828 if (hash_type != 1 && hash_type != 3)
1829 type = PKT_HASH_TYPE_L3;
1830 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1833 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1834 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1836 if ((rxcmp1->rx_cmp_flags2 &
1837 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1838 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1839 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1840 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1841 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1843 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1846 skb_checksum_none_assert(skb);
1847 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1848 if (dev->features & NETIF_F_RXCSUM) {
1849 skb->ip_summed = CHECKSUM_UNNECESSARY;
1850 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1853 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1854 if (dev->features & NETIF_F_RXCSUM)
1855 bnapi->cp_ring.rx_l4_csum_errors++;
1859 bnxt_deliver_skb(bp, bnapi, skb);
1863 cpr->rx_packets += 1;
1864 cpr->rx_bytes += len;
1867 rxr->rx_prod = NEXT_RX(prod);
1868 rxr->rx_next_cons = NEXT_RX(cons);
1870 next_rx_no_prod_no_len:
1871 *raw_cons = tmp_raw_cons;
1876 /* In netpoll mode, if we are using a combined completion ring, we need to
1877 * discard the rx packets and recycle the buffers.
1879 static int bnxt_force_rx_discard(struct bnxt *bp,
1880 struct bnxt_cp_ring_info *cpr,
1881 u32 *raw_cons, u8 *event)
1883 u32 tmp_raw_cons = *raw_cons;
1884 struct rx_cmp_ext *rxcmp1;
1885 struct rx_cmp *rxcmp;
1889 cp_cons = RING_CMP(tmp_raw_cons);
1890 rxcmp = (struct rx_cmp *)
1891 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1893 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1894 cp_cons = RING_CMP(tmp_raw_cons);
1895 rxcmp1 = (struct rx_cmp_ext *)
1896 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1898 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1901 cmp_type = RX_CMP_TYPE(rxcmp);
1902 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1903 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1904 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1905 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1906 struct rx_tpa_end_cmp_ext *tpa_end1;
1908 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1909 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1910 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1912 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1915 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1917 struct bnxt_fw_health *fw_health = bp->fw_health;
1918 u32 reg = fw_health->regs[reg_idx];
1919 u32 reg_type, reg_off, val = 0;
1921 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1922 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1924 case BNXT_FW_HEALTH_REG_TYPE_CFG:
1925 pci_read_config_dword(bp->pdev, reg_off, &val);
1927 case BNXT_FW_HEALTH_REG_TYPE_GRC:
1928 reg_off = fw_health->mapped_regs[reg_idx];
1930 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1931 val = readl(bp->bar0 + reg_off);
1933 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1934 val = readl(bp->bar1 + reg_off);
1937 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1938 val &= fw_health->fw_reset_inprog_reg_mask;
1942 #define BNXT_GET_EVENT_PORT(data) \
1944 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1946 static int bnxt_async_event_process(struct bnxt *bp,
1947 struct hwrm_async_event_cmpl *cmpl)
1949 u16 event_id = le16_to_cpu(cmpl->event_id);
1951 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1953 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1954 u32 data1 = le32_to_cpu(cmpl->event_data1);
1955 struct bnxt_link_info *link_info = &bp->link_info;
1958 goto async_event_process_exit;
1960 /* print unsupported speed warning in forced speed mode only */
1961 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1962 (data1 & 0x20000)) {
1963 u16 fw_speed = link_info->force_link_speed;
1964 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1966 if (speed != SPEED_UNKNOWN)
1967 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1970 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1973 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
1974 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
1975 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
1977 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1978 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1980 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1981 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1983 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1984 u32 data1 = le32_to_cpu(cmpl->event_data1);
1985 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1990 if (bp->pf.port_id != port_id)
1993 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1996 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1998 goto async_event_process_exit;
1999 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2001 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2002 u32 data1 = le32_to_cpu(cmpl->event_data1);
2005 goto async_event_process_exit;
2007 bp->fw_reset_timestamp = jiffies;
2008 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2009 if (!bp->fw_reset_min_dsecs)
2010 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2011 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2012 if (!bp->fw_reset_max_dsecs)
2013 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2014 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2015 netdev_warn(bp->dev, "Firmware fatal reset event received\n");
2016 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2018 netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
2019 bp->fw_reset_max_dsecs * 100);
2021 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2024 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2025 struct bnxt_fw_health *fw_health = bp->fw_health;
2026 u32 data1 = le32_to_cpu(cmpl->event_data1);
2029 goto async_event_process_exit;
2031 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2032 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2033 if (!fw_health->enabled)
2036 if (netif_msg_drv(bp))
2037 netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
2038 fw_health->enabled, fw_health->master,
2039 bnxt_fw_health_readl(bp,
2040 BNXT_FW_RESET_CNT_REG),
2041 bnxt_fw_health_readl(bp,
2042 BNXT_FW_HEALTH_REG));
2043 fw_health->tmr_multiplier =
2044 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2045 bp->current_interval * 10);
2046 fw_health->tmr_counter = fw_health->tmr_multiplier;
2047 fw_health->last_fw_heartbeat =
2048 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2049 fw_health->last_fw_reset_cnt =
2050 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2051 goto async_event_process_exit;
2054 goto async_event_process_exit;
2056 bnxt_queue_sp_work(bp);
2057 async_event_process_exit:
2058 bnxt_ulp_async_events(bp, cmpl);
2062 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2064 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2065 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2066 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2067 (struct hwrm_fwd_req_cmpl *)txcmp;
2069 switch (cmpl_type) {
2070 case CMPL_BASE_TYPE_HWRM_DONE:
2071 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2072 if (seq_id == bp->hwrm_intr_seq_id)
2073 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2075 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2078 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2079 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2081 if ((vf_id < bp->pf.first_vf_id) ||
2082 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2083 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2088 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2089 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2090 bnxt_queue_sp_work(bp);
2093 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2094 bnxt_async_event_process(bp,
2095 (struct hwrm_async_event_cmpl *)txcmp);
2104 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2106 struct bnxt_napi *bnapi = dev_instance;
2107 struct bnxt *bp = bnapi->bp;
2108 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2109 u32 cons = RING_CMP(cpr->cp_raw_cons);
2112 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2113 napi_schedule(&bnapi->napi);
2117 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2119 u32 raw_cons = cpr->cp_raw_cons;
2120 u16 cons = RING_CMP(raw_cons);
2121 struct tx_cmp *txcmp;
2123 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2125 return TX_CMP_VALID(txcmp, raw_cons);
2128 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2130 struct bnxt_napi *bnapi = dev_instance;
2131 struct bnxt *bp = bnapi->bp;
2132 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2133 u32 cons = RING_CMP(cpr->cp_raw_cons);
2136 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2138 if (!bnxt_has_work(bp, cpr)) {
2139 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2140 /* return if erroneous interrupt */
2141 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2145 /* disable ring IRQ */
2146 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2148 /* Return here if interrupt is shared and is disabled. */
2149 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2152 napi_schedule(&bnapi->napi);
2156 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2159 struct bnxt_napi *bnapi = cpr->bnapi;
2160 u32 raw_cons = cpr->cp_raw_cons;
2165 struct tx_cmp *txcmp;
2167 cpr->has_more_work = 0;
2171 cons = RING_CMP(raw_cons);
2172 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2174 if (!TX_CMP_VALID(txcmp, raw_cons))
2177 /* The valid test of the entry must be done first before
2178 * reading any further.
2181 cpr->had_work_done = 1;
2182 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2184 /* return full budget so NAPI will complete. */
2185 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2187 raw_cons = NEXT_RAW_CMP(raw_cons);
2189 cpr->has_more_work = 1;
2192 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2194 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2196 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2198 if (likely(rc >= 0))
2200 /* Increment rx_pkts when rc is -ENOMEM to count towards
2201 * the NAPI budget. Otherwise, we may potentially loop
2202 * here forever if we consistently cannot allocate
2205 else if (rc == -ENOMEM && budget)
2207 else if (rc == -EBUSY) /* partial completion */
2209 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2210 CMPL_BASE_TYPE_HWRM_DONE) ||
2211 (TX_CMP_TYPE(txcmp) ==
2212 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2213 (TX_CMP_TYPE(txcmp) ==
2214 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2215 bnxt_hwrm_handler(bp, txcmp);
2217 raw_cons = NEXT_RAW_CMP(raw_cons);
2219 if (rx_pkts && rx_pkts == budget) {
2220 cpr->has_more_work = 1;
2225 if (event & BNXT_REDIRECT_EVENT)
2228 if (event & BNXT_TX_EVENT) {
2229 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2230 u16 prod = txr->tx_prod;
2232 /* Sync BD data before updating doorbell */
2235 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2238 cpr->cp_raw_cons = raw_cons;
2239 bnapi->tx_pkts += tx_pkts;
2240 bnapi->events |= event;
2244 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2246 if (bnapi->tx_pkts) {
2247 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2251 if (bnapi->events & BNXT_RX_EVENT) {
2252 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2254 if (bnapi->events & BNXT_AGG_EVENT)
2255 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2256 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2261 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2264 struct bnxt_napi *bnapi = cpr->bnapi;
2267 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2269 /* ACK completion ring before freeing tx ring and producing new
2270 * buffers in rx/agg rings to prevent overflowing the completion
2273 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2275 __bnxt_poll_work_done(bp, bnapi);
2279 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2281 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2282 struct bnxt *bp = bnapi->bp;
2283 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2284 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2285 struct tx_cmp *txcmp;
2286 struct rx_cmp_ext *rxcmp1;
2287 u32 cp_cons, tmp_raw_cons;
2288 u32 raw_cons = cpr->cp_raw_cons;
2295 cp_cons = RING_CMP(raw_cons);
2296 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2298 if (!TX_CMP_VALID(txcmp, raw_cons))
2301 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2302 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2303 cp_cons = RING_CMP(tmp_raw_cons);
2304 rxcmp1 = (struct rx_cmp_ext *)
2305 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2307 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2310 /* force an error to recycle the buffer */
2311 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2312 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2314 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2315 if (likely(rc == -EIO) && budget)
2317 else if (rc == -EBUSY) /* partial completion */
2319 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2320 CMPL_BASE_TYPE_HWRM_DONE)) {
2321 bnxt_hwrm_handler(bp, txcmp);
2324 "Invalid completion received on special ring\n");
2326 raw_cons = NEXT_RAW_CMP(raw_cons);
2328 if (rx_pkts == budget)
2332 cpr->cp_raw_cons = raw_cons;
2333 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2334 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2336 if (event & BNXT_AGG_EVENT)
2337 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2339 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2340 napi_complete_done(napi, rx_pkts);
2341 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2346 static int bnxt_poll(struct napi_struct *napi, int budget)
2348 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2349 struct bnxt *bp = bnapi->bp;
2350 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2354 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2356 if (work_done >= budget) {
2358 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2362 if (!bnxt_has_work(bp, cpr)) {
2363 if (napi_complete_done(napi, work_done))
2364 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2368 if (bp->flags & BNXT_FLAG_DIM) {
2369 struct dim_sample dim_sample = {};
2371 dim_update_sample(cpr->event_ctr,
2375 net_dim(&cpr->dim, dim_sample);
2380 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2382 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2383 int i, work_done = 0;
2385 for (i = 0; i < 2; i++) {
2386 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2389 work_done += __bnxt_poll_work(bp, cpr2,
2390 budget - work_done);
2391 cpr->has_more_work |= cpr2->has_more_work;
2397 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2398 u64 dbr_type, bool all)
2400 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2403 for (i = 0; i < 2; i++) {
2404 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2405 struct bnxt_db_info *db;
2407 if (cpr2 && (all || cpr2->had_work_done)) {
2409 writeq(db->db_key64 | dbr_type |
2410 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2411 cpr2->had_work_done = 0;
2414 __bnxt_poll_work_done(bp, bnapi);
2417 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2419 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2420 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2421 u32 raw_cons = cpr->cp_raw_cons;
2422 struct bnxt *bp = bnapi->bp;
2423 struct nqe_cn *nqcmp;
2427 if (cpr->has_more_work) {
2428 cpr->has_more_work = 0;
2429 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2430 if (cpr->has_more_work) {
2431 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2434 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2435 if (napi_complete_done(napi, work_done))
2436 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2440 cons = RING_CMP(raw_cons);
2441 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2443 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2444 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2446 cpr->cp_raw_cons = raw_cons;
2447 if (napi_complete_done(napi, work_done))
2448 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2453 /* The valid test of the entry must be done first before
2454 * reading any further.
2458 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2459 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2460 struct bnxt_cp_ring_info *cpr2;
2462 cpr2 = cpr->cp_ring_arr[idx];
2463 work_done += __bnxt_poll_work(bp, cpr2,
2464 budget - work_done);
2465 cpr->has_more_work = cpr2->has_more_work;
2467 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2469 raw_cons = NEXT_RAW_CMP(raw_cons);
2470 if (cpr->has_more_work)
2473 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2474 cpr->cp_raw_cons = raw_cons;
2478 static void bnxt_free_tx_skbs(struct bnxt *bp)
2481 struct pci_dev *pdev = bp->pdev;
2486 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2487 for (i = 0; i < bp->tx_nr_rings; i++) {
2488 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2491 for (j = 0; j < max_idx;) {
2492 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2493 struct sk_buff *skb;
2496 if (i < bp->tx_nr_rings_xdp &&
2497 tx_buf->action == XDP_REDIRECT) {
2498 dma_unmap_single(&pdev->dev,
2499 dma_unmap_addr(tx_buf, mapping),
2500 dma_unmap_len(tx_buf, len),
2502 xdp_return_frame(tx_buf->xdpf);
2504 tx_buf->xdpf = NULL;
2517 if (tx_buf->is_push) {
2523 dma_unmap_single(&pdev->dev,
2524 dma_unmap_addr(tx_buf, mapping),
2528 last = tx_buf->nr_frags;
2530 for (k = 0; k < last; k++, j++) {
2531 int ring_idx = j & bp->tx_ring_mask;
2532 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2534 tx_buf = &txr->tx_buf_ring[ring_idx];
2537 dma_unmap_addr(tx_buf, mapping),
2538 skb_frag_size(frag), PCI_DMA_TODEVICE);
2542 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2546 static void bnxt_free_rx_skbs(struct bnxt *bp)
2548 int i, max_idx, max_agg_idx;
2549 struct pci_dev *pdev = bp->pdev;
2554 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2555 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2556 for (i = 0; i < bp->rx_nr_rings; i++) {
2557 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2558 struct bnxt_tpa_idx_map *map;
2562 for (j = 0; j < bp->max_tpa; j++) {
2563 struct bnxt_tpa_info *tpa_info =
2565 u8 *data = tpa_info->data;
2570 dma_unmap_single_attrs(&pdev->dev,
2572 bp->rx_buf_use_size,
2574 DMA_ATTR_WEAK_ORDERING);
2576 tpa_info->data = NULL;
2582 for (j = 0; j < max_idx; j++) {
2583 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2584 dma_addr_t mapping = rx_buf->mapping;
2585 void *data = rx_buf->data;
2590 rx_buf->data = NULL;
2592 if (BNXT_RX_PAGE_MODE(bp)) {
2593 mapping -= bp->rx_dma_offset;
2594 dma_unmap_page_attrs(&pdev->dev, mapping,
2595 PAGE_SIZE, bp->rx_dir,
2596 DMA_ATTR_WEAK_ORDERING);
2597 page_pool_recycle_direct(rxr->page_pool, data);
2599 dma_unmap_single_attrs(&pdev->dev, mapping,
2600 bp->rx_buf_use_size,
2602 DMA_ATTR_WEAK_ORDERING);
2607 for (j = 0; j < max_agg_idx; j++) {
2608 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2609 &rxr->rx_agg_ring[j];
2610 struct page *page = rx_agg_buf->page;
2615 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2618 DMA_ATTR_WEAK_ORDERING);
2620 rx_agg_buf->page = NULL;
2621 __clear_bit(j, rxr->rx_agg_bmap);
2626 __free_page(rxr->rx_page);
2627 rxr->rx_page = NULL;
2629 map = rxr->rx_tpa_idx_map;
2631 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2635 static void bnxt_free_skbs(struct bnxt *bp)
2637 bnxt_free_tx_skbs(bp);
2638 bnxt_free_rx_skbs(bp);
2641 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2643 struct pci_dev *pdev = bp->pdev;
2646 for (i = 0; i < rmem->nr_pages; i++) {
2647 if (!rmem->pg_arr[i])
2650 dma_free_coherent(&pdev->dev, rmem->page_size,
2651 rmem->pg_arr[i], rmem->dma_arr[i]);
2653 rmem->pg_arr[i] = NULL;
2656 size_t pg_tbl_size = rmem->nr_pages * 8;
2658 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2659 pg_tbl_size = rmem->page_size;
2660 dma_free_coherent(&pdev->dev, pg_tbl_size,
2661 rmem->pg_tbl, rmem->pg_tbl_map);
2662 rmem->pg_tbl = NULL;
2664 if (rmem->vmem_size && *rmem->vmem) {
2670 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2672 struct pci_dev *pdev = bp->pdev;
2676 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2677 valid_bit = PTU_PTE_VALID;
2678 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2679 size_t pg_tbl_size = rmem->nr_pages * 8;
2681 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2682 pg_tbl_size = rmem->page_size;
2683 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2690 for (i = 0; i < rmem->nr_pages; i++) {
2691 u64 extra_bits = valid_bit;
2693 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2697 if (!rmem->pg_arr[i])
2701 memset(rmem->pg_arr[i], rmem->init_val,
2703 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2704 if (i == rmem->nr_pages - 2 &&
2705 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2706 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2707 else if (i == rmem->nr_pages - 1 &&
2708 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2709 extra_bits |= PTU_PTE_LAST;
2711 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2715 if (rmem->vmem_size) {
2716 *rmem->vmem = vzalloc(rmem->vmem_size);
2723 static void bnxt_free_tpa_info(struct bnxt *bp)
2727 for (i = 0; i < bp->rx_nr_rings; i++) {
2728 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2730 kfree(rxr->rx_tpa_idx_map);
2731 rxr->rx_tpa_idx_map = NULL;
2733 kfree(rxr->rx_tpa[0].agg_arr);
2734 rxr->rx_tpa[0].agg_arr = NULL;
2741 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2743 int i, j, total_aggs = 0;
2745 bp->max_tpa = MAX_TPA;
2746 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2747 if (!bp->max_tpa_v2)
2749 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2750 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2753 for (i = 0; i < bp->rx_nr_rings; i++) {
2754 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2755 struct rx_agg_cmp *agg;
2757 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2762 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2764 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2765 rxr->rx_tpa[0].agg_arr = agg;
2768 for (j = 1; j < bp->max_tpa; j++)
2769 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2770 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2772 if (!rxr->rx_tpa_idx_map)
2778 static void bnxt_free_rx_rings(struct bnxt *bp)
2785 bnxt_free_tpa_info(bp);
2786 for (i = 0; i < bp->rx_nr_rings; i++) {
2787 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2788 struct bnxt_ring_struct *ring;
2791 bpf_prog_put(rxr->xdp_prog);
2793 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2794 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2796 page_pool_destroy(rxr->page_pool);
2797 rxr->page_pool = NULL;
2799 kfree(rxr->rx_agg_bmap);
2800 rxr->rx_agg_bmap = NULL;
2802 ring = &rxr->rx_ring_struct;
2803 bnxt_free_ring(bp, &ring->ring_mem);
2805 ring = &rxr->rx_agg_ring_struct;
2806 bnxt_free_ring(bp, &ring->ring_mem);
2810 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2811 struct bnxt_rx_ring_info *rxr)
2813 struct page_pool_params pp = { 0 };
2815 pp.pool_size = bp->rx_ring_size;
2816 pp.nid = dev_to_node(&bp->pdev->dev);
2817 pp.dev = &bp->pdev->dev;
2818 pp.dma_dir = DMA_BIDIRECTIONAL;
2820 rxr->page_pool = page_pool_create(&pp);
2821 if (IS_ERR(rxr->page_pool)) {
2822 int err = PTR_ERR(rxr->page_pool);
2824 rxr->page_pool = NULL;
2830 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2832 int i, rc = 0, agg_rings = 0;
2837 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2840 for (i = 0; i < bp->rx_nr_rings; i++) {
2841 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2842 struct bnxt_ring_struct *ring;
2844 ring = &rxr->rx_ring_struct;
2846 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2850 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2854 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
2858 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2862 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2870 ring = &rxr->rx_agg_ring_struct;
2871 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2876 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2877 mem_size = rxr->rx_agg_bmap_size / 8;
2878 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2879 if (!rxr->rx_agg_bmap)
2883 if (bp->flags & BNXT_FLAG_TPA)
2884 rc = bnxt_alloc_tpa_info(bp);
2888 static void bnxt_free_tx_rings(struct bnxt *bp)
2891 struct pci_dev *pdev = bp->pdev;
2896 for (i = 0; i < bp->tx_nr_rings; i++) {
2897 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2898 struct bnxt_ring_struct *ring;
2901 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2902 txr->tx_push, txr->tx_push_mapping);
2903 txr->tx_push = NULL;
2906 ring = &txr->tx_ring_struct;
2908 bnxt_free_ring(bp, &ring->ring_mem);
2912 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2915 struct pci_dev *pdev = bp->pdev;
2917 bp->tx_push_size = 0;
2918 if (bp->tx_push_thresh) {
2921 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2922 bp->tx_push_thresh);
2924 if (push_size > 256) {
2926 bp->tx_push_thresh = 0;
2929 bp->tx_push_size = push_size;
2932 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2933 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2934 struct bnxt_ring_struct *ring;
2937 ring = &txr->tx_ring_struct;
2939 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2943 ring->grp_idx = txr->bnapi->index;
2944 if (bp->tx_push_size) {
2947 /* One pre-allocated DMA buffer to backup
2950 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2952 &txr->tx_push_mapping,
2958 mapping = txr->tx_push_mapping +
2959 sizeof(struct tx_push_bd);
2960 txr->data_mapping = cpu_to_le64(mapping);
2962 qidx = bp->tc_to_qidx[j];
2963 ring->queue_id = bp->q_info[qidx].queue_id;
2964 if (i < bp->tx_nr_rings_xdp)
2966 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2972 static void bnxt_free_cp_rings(struct bnxt *bp)
2979 for (i = 0; i < bp->cp_nr_rings; i++) {
2980 struct bnxt_napi *bnapi = bp->bnapi[i];
2981 struct bnxt_cp_ring_info *cpr;
2982 struct bnxt_ring_struct *ring;
2988 cpr = &bnapi->cp_ring;
2989 ring = &cpr->cp_ring_struct;
2991 bnxt_free_ring(bp, &ring->ring_mem);
2993 for (j = 0; j < 2; j++) {
2994 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2997 ring = &cpr2->cp_ring_struct;
2998 bnxt_free_ring(bp, &ring->ring_mem);
3000 cpr->cp_ring_arr[j] = NULL;
3006 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3008 struct bnxt_ring_mem_info *rmem;
3009 struct bnxt_ring_struct *ring;
3010 struct bnxt_cp_ring_info *cpr;
3013 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3017 ring = &cpr->cp_ring_struct;
3018 rmem = &ring->ring_mem;
3019 rmem->nr_pages = bp->cp_nr_pages;
3020 rmem->page_size = HW_CMPD_RING_SIZE;
3021 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3022 rmem->dma_arr = cpr->cp_desc_mapping;
3023 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3024 rc = bnxt_alloc_ring(bp, rmem);
3026 bnxt_free_ring(bp, rmem);
3033 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3035 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3036 int i, rc, ulp_base_vec, ulp_msix;
3038 ulp_msix = bnxt_get_ulp_msix_num(bp);
3039 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3040 for (i = 0; i < bp->cp_nr_rings; i++) {
3041 struct bnxt_napi *bnapi = bp->bnapi[i];
3042 struct bnxt_cp_ring_info *cpr;
3043 struct bnxt_ring_struct *ring;
3048 cpr = &bnapi->cp_ring;
3050 ring = &cpr->cp_ring_struct;
3052 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3056 if (ulp_msix && i >= ulp_base_vec)
3057 ring->map_idx = i + ulp_msix;
3061 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3064 if (i < bp->rx_nr_rings) {
3065 struct bnxt_cp_ring_info *cpr2 =
3066 bnxt_alloc_cp_sub_ring(bp);
3068 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3071 cpr2->bnapi = bnapi;
3073 if ((sh && i < bp->tx_nr_rings) ||
3074 (!sh && i >= bp->rx_nr_rings)) {
3075 struct bnxt_cp_ring_info *cpr2 =
3076 bnxt_alloc_cp_sub_ring(bp);
3078 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3081 cpr2->bnapi = bnapi;
3087 static void bnxt_init_ring_struct(struct bnxt *bp)
3091 for (i = 0; i < bp->cp_nr_rings; i++) {
3092 struct bnxt_napi *bnapi = bp->bnapi[i];
3093 struct bnxt_ring_mem_info *rmem;
3094 struct bnxt_cp_ring_info *cpr;
3095 struct bnxt_rx_ring_info *rxr;
3096 struct bnxt_tx_ring_info *txr;
3097 struct bnxt_ring_struct *ring;
3102 cpr = &bnapi->cp_ring;
3103 ring = &cpr->cp_ring_struct;
3104 rmem = &ring->ring_mem;
3105 rmem->nr_pages = bp->cp_nr_pages;
3106 rmem->page_size = HW_CMPD_RING_SIZE;
3107 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3108 rmem->dma_arr = cpr->cp_desc_mapping;
3109 rmem->vmem_size = 0;
3111 rxr = bnapi->rx_ring;
3115 ring = &rxr->rx_ring_struct;
3116 rmem = &ring->ring_mem;
3117 rmem->nr_pages = bp->rx_nr_pages;
3118 rmem->page_size = HW_RXBD_RING_SIZE;
3119 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3120 rmem->dma_arr = rxr->rx_desc_mapping;
3121 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3122 rmem->vmem = (void **)&rxr->rx_buf_ring;
3124 ring = &rxr->rx_agg_ring_struct;
3125 rmem = &ring->ring_mem;
3126 rmem->nr_pages = bp->rx_agg_nr_pages;
3127 rmem->page_size = HW_RXBD_RING_SIZE;
3128 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3129 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3130 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3131 rmem->vmem = (void **)&rxr->rx_agg_ring;
3134 txr = bnapi->tx_ring;
3138 ring = &txr->tx_ring_struct;
3139 rmem = &ring->ring_mem;
3140 rmem->nr_pages = bp->tx_nr_pages;
3141 rmem->page_size = HW_RXBD_RING_SIZE;
3142 rmem->pg_arr = (void **)txr->tx_desc_ring;
3143 rmem->dma_arr = txr->tx_desc_mapping;
3144 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3145 rmem->vmem = (void **)&txr->tx_buf_ring;
3149 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3153 struct rx_bd **rx_buf_ring;
3155 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3156 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3160 rxbd = rx_buf_ring[i];
3164 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3165 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3166 rxbd->rx_bd_opaque = prod;
3171 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3173 struct net_device *dev = bp->dev;
3174 struct bnxt_rx_ring_info *rxr;
3175 struct bnxt_ring_struct *ring;
3179 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3180 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3182 if (NET_IP_ALIGN == 2)
3183 type |= RX_BD_FLAGS_SOP;
3185 rxr = &bp->rx_ring[ring_nr];
3186 ring = &rxr->rx_ring_struct;
3187 bnxt_init_rxbd_pages(ring, type);
3189 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3190 bpf_prog_add(bp->xdp_prog, 1);
3191 rxr->xdp_prog = bp->xdp_prog;
3193 prod = rxr->rx_prod;
3194 for (i = 0; i < bp->rx_ring_size; i++) {
3195 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
3196 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3197 ring_nr, i, bp->rx_ring_size);
3200 prod = NEXT_RX(prod);
3202 rxr->rx_prod = prod;
3203 ring->fw_ring_id = INVALID_HW_RING_ID;
3205 ring = &rxr->rx_agg_ring_struct;
3206 ring->fw_ring_id = INVALID_HW_RING_ID;
3208 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3211 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3212 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3214 bnxt_init_rxbd_pages(ring, type);
3216 prod = rxr->rx_agg_prod;
3217 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3218 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
3219 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3220 ring_nr, i, bp->rx_ring_size);
3223 prod = NEXT_RX_AGG(prod);
3225 rxr->rx_agg_prod = prod;
3227 if (bp->flags & BNXT_FLAG_TPA) {
3232 for (i = 0; i < bp->max_tpa; i++) {
3233 data = __bnxt_alloc_rx_data(bp, &mapping,
3238 rxr->rx_tpa[i].data = data;
3239 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3240 rxr->rx_tpa[i].mapping = mapping;
3243 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
3251 static void bnxt_init_cp_rings(struct bnxt *bp)
3255 for (i = 0; i < bp->cp_nr_rings; i++) {
3256 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3257 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3259 ring->fw_ring_id = INVALID_HW_RING_ID;
3260 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3261 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3262 for (j = 0; j < 2; j++) {
3263 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3268 ring = &cpr2->cp_ring_struct;
3269 ring->fw_ring_id = INVALID_HW_RING_ID;
3270 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3271 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3276 static int bnxt_init_rx_rings(struct bnxt *bp)
3280 if (BNXT_RX_PAGE_MODE(bp)) {
3281 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3282 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3284 bp->rx_offset = BNXT_RX_OFFSET;
3285 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3288 for (i = 0; i < bp->rx_nr_rings; i++) {
3289 rc = bnxt_init_one_rx_ring(bp, i);
3297 static int bnxt_init_tx_rings(struct bnxt *bp)
3301 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3304 for (i = 0; i < bp->tx_nr_rings; i++) {
3305 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3306 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3308 ring->fw_ring_id = INVALID_HW_RING_ID;
3314 static void bnxt_free_ring_grps(struct bnxt *bp)
3316 kfree(bp->grp_info);
3317 bp->grp_info = NULL;
3320 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3325 bp->grp_info = kcalloc(bp->cp_nr_rings,
3326 sizeof(struct bnxt_ring_grp_info),
3331 for (i = 0; i < bp->cp_nr_rings; i++) {
3333 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3334 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3335 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3336 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3337 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3342 static void bnxt_free_vnics(struct bnxt *bp)
3344 kfree(bp->vnic_info);
3345 bp->vnic_info = NULL;
3349 static int bnxt_alloc_vnics(struct bnxt *bp)
3353 #ifdef CONFIG_RFS_ACCEL
3354 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3355 num_vnics += bp->rx_nr_rings;
3358 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3361 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3366 bp->nr_vnics = num_vnics;
3370 static void bnxt_init_vnics(struct bnxt *bp)
3374 for (i = 0; i < bp->nr_vnics; i++) {
3375 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3378 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3379 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3380 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3382 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3384 if (bp->vnic_info[i].rss_hash_key) {
3386 prandom_bytes(vnic->rss_hash_key,
3389 memcpy(vnic->rss_hash_key,
3390 bp->vnic_info[0].rss_hash_key,
3396 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3400 pages = ring_size / desc_per_pg;
3407 while (pages & (pages - 1))
3413 void bnxt_set_tpa_flags(struct bnxt *bp)
3415 bp->flags &= ~BNXT_FLAG_TPA;
3416 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3418 if (bp->dev->features & NETIF_F_LRO)
3419 bp->flags |= BNXT_FLAG_LRO;
3420 else if (bp->dev->features & NETIF_F_GRO_HW)
3421 bp->flags |= BNXT_FLAG_GRO;
3424 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3427 void bnxt_set_ring_params(struct bnxt *bp)
3429 u32 ring_size, rx_size, rx_space;
3430 u32 agg_factor = 0, agg_ring_size = 0;
3432 /* 8 for CRC and VLAN */
3433 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3435 rx_space = rx_size + NET_SKB_PAD +
3436 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3438 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3439 ring_size = bp->rx_ring_size;
3440 bp->rx_agg_ring_size = 0;
3441 bp->rx_agg_nr_pages = 0;
3443 if (bp->flags & BNXT_FLAG_TPA)
3444 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3446 bp->flags &= ~BNXT_FLAG_JUMBO;
3447 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3450 bp->flags |= BNXT_FLAG_JUMBO;
3451 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3452 if (jumbo_factor > agg_factor)
3453 agg_factor = jumbo_factor;
3455 agg_ring_size = ring_size * agg_factor;
3457 if (agg_ring_size) {
3458 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3460 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3461 u32 tmp = agg_ring_size;
3463 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3464 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3465 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3466 tmp, agg_ring_size);
3468 bp->rx_agg_ring_size = agg_ring_size;
3469 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3470 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3471 rx_space = rx_size + NET_SKB_PAD +
3472 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3475 bp->rx_buf_use_size = rx_size;
3476 bp->rx_buf_size = rx_space;
3478 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3479 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3481 ring_size = bp->tx_ring_size;
3482 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3483 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3485 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3486 bp->cp_ring_size = ring_size;
3488 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3489 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3490 bp->cp_nr_pages = MAX_CP_PAGES;
3491 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3492 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3493 ring_size, bp->cp_ring_size);
3495 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3496 bp->cp_ring_mask = bp->cp_bit - 1;
3499 /* Changing allocation mode of RX rings.
3500 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3502 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3505 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3508 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3509 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3510 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3511 bp->rx_dir = DMA_BIDIRECTIONAL;
3512 bp->rx_skb_func = bnxt_rx_page_skb;
3513 /* Disable LRO or GRO_HW */
3514 netdev_update_features(bp->dev);
3516 bp->dev->max_mtu = bp->max_mtu;
3517 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3518 bp->rx_dir = DMA_FROM_DEVICE;
3519 bp->rx_skb_func = bnxt_rx_skb;
3524 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3527 struct bnxt_vnic_info *vnic;
3528 struct pci_dev *pdev = bp->pdev;
3533 for (i = 0; i < bp->nr_vnics; i++) {
3534 vnic = &bp->vnic_info[i];
3536 kfree(vnic->fw_grp_ids);
3537 vnic->fw_grp_ids = NULL;
3539 kfree(vnic->uc_list);
3540 vnic->uc_list = NULL;
3542 if (vnic->mc_list) {
3543 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3544 vnic->mc_list, vnic->mc_list_mapping);
3545 vnic->mc_list = NULL;
3548 if (vnic->rss_table) {
3549 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3551 vnic->rss_table_dma_addr);
3552 vnic->rss_table = NULL;
3555 vnic->rss_hash_key = NULL;
3560 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3562 int i, rc = 0, size;
3563 struct bnxt_vnic_info *vnic;
3564 struct pci_dev *pdev = bp->pdev;
3567 for (i = 0; i < bp->nr_vnics; i++) {
3568 vnic = &bp->vnic_info[i];
3570 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3571 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3574 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3575 if (!vnic->uc_list) {
3582 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3583 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3585 dma_alloc_coherent(&pdev->dev,
3587 &vnic->mc_list_mapping,
3589 if (!vnic->mc_list) {
3595 if (bp->flags & BNXT_FLAG_CHIP_P5)
3596 goto vnic_skip_grps;
3598 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3599 max_rings = bp->rx_nr_rings;
3603 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3604 if (!vnic->fw_grp_ids) {
3609 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3610 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3613 /* Allocate rss table and hash key */
3614 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3615 &vnic->rss_table_dma_addr,
3617 if (!vnic->rss_table) {
3622 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3624 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3625 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3633 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3635 struct pci_dev *pdev = bp->pdev;
3637 if (bp->hwrm_cmd_resp_addr) {
3638 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3639 bp->hwrm_cmd_resp_dma_addr);
3640 bp->hwrm_cmd_resp_addr = NULL;
3643 if (bp->hwrm_cmd_kong_resp_addr) {
3644 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3645 bp->hwrm_cmd_kong_resp_addr,
3646 bp->hwrm_cmd_kong_resp_dma_addr);
3647 bp->hwrm_cmd_kong_resp_addr = NULL;
3651 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3653 struct pci_dev *pdev = bp->pdev;
3655 if (bp->hwrm_cmd_kong_resp_addr)
3658 bp->hwrm_cmd_kong_resp_addr =
3659 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3660 &bp->hwrm_cmd_kong_resp_dma_addr,
3662 if (!bp->hwrm_cmd_kong_resp_addr)
3668 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3670 struct pci_dev *pdev = bp->pdev;
3672 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3673 &bp->hwrm_cmd_resp_dma_addr,
3675 if (!bp->hwrm_cmd_resp_addr)
3681 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3683 if (bp->hwrm_short_cmd_req_addr) {
3684 struct pci_dev *pdev = bp->pdev;
3686 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3687 bp->hwrm_short_cmd_req_addr,
3688 bp->hwrm_short_cmd_req_dma_addr);
3689 bp->hwrm_short_cmd_req_addr = NULL;
3693 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3695 struct pci_dev *pdev = bp->pdev;
3697 if (bp->hwrm_short_cmd_req_addr)
3700 bp->hwrm_short_cmd_req_addr =
3701 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3702 &bp->hwrm_short_cmd_req_dma_addr,
3704 if (!bp->hwrm_short_cmd_req_addr)
3710 static void bnxt_free_port_stats(struct bnxt *bp)
3712 struct pci_dev *pdev = bp->pdev;
3714 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3715 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3717 if (bp->hw_rx_port_stats) {
3718 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3719 bp->hw_rx_port_stats,
3720 bp->hw_rx_port_stats_map);
3721 bp->hw_rx_port_stats = NULL;
3724 if (bp->hw_tx_port_stats_ext) {
3725 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3726 bp->hw_tx_port_stats_ext,
3727 bp->hw_tx_port_stats_ext_map);
3728 bp->hw_tx_port_stats_ext = NULL;
3731 if (bp->hw_rx_port_stats_ext) {
3732 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3733 bp->hw_rx_port_stats_ext,
3734 bp->hw_rx_port_stats_ext_map);
3735 bp->hw_rx_port_stats_ext = NULL;
3738 if (bp->hw_pcie_stats) {
3739 dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3740 bp->hw_pcie_stats, bp->hw_pcie_stats_map);
3741 bp->hw_pcie_stats = NULL;
3745 static void bnxt_free_ring_stats(struct bnxt *bp)
3747 struct pci_dev *pdev = bp->pdev;
3753 size = bp->hw_ring_stats_size;
3755 for (i = 0; i < bp->cp_nr_rings; i++) {
3756 struct bnxt_napi *bnapi = bp->bnapi[i];
3757 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3759 if (cpr->hw_stats) {
3760 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3762 cpr->hw_stats = NULL;
3767 static int bnxt_alloc_stats(struct bnxt *bp)
3770 struct pci_dev *pdev = bp->pdev;
3772 size = bp->hw_ring_stats_size;
3774 for (i = 0; i < bp->cp_nr_rings; i++) {
3775 struct bnxt_napi *bnapi = bp->bnapi[i];
3776 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3778 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3784 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3787 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
3790 if (bp->hw_rx_port_stats)
3791 goto alloc_ext_stats;
3793 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3794 sizeof(struct tx_port_stats) + 1024;
3796 bp->hw_rx_port_stats =
3797 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3798 &bp->hw_rx_port_stats_map,
3800 if (!bp->hw_rx_port_stats)
3803 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
3804 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3805 sizeof(struct rx_port_stats) + 512;
3806 bp->flags |= BNXT_FLAG_PORT_STATS;
3809 /* Display extended statistics only if FW supports it */
3810 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
3811 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
3814 if (bp->hw_rx_port_stats_ext)
3815 goto alloc_tx_ext_stats;
3817 bp->hw_rx_port_stats_ext =
3818 dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3819 &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
3820 if (!bp->hw_rx_port_stats_ext)
3824 if (bp->hw_tx_port_stats_ext)
3825 goto alloc_pcie_stats;
3827 if (bp->hwrm_spec_code >= 0x10902 ||
3828 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
3829 bp->hw_tx_port_stats_ext =
3830 dma_alloc_coherent(&pdev->dev,
3831 sizeof(struct tx_port_stats_ext),
3832 &bp->hw_tx_port_stats_ext_map,
3835 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3838 if (bp->hw_pcie_stats ||
3839 !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
3843 dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3844 &bp->hw_pcie_stats_map, GFP_KERNEL);
3845 if (!bp->hw_pcie_stats)
3848 bp->flags |= BNXT_FLAG_PCIE_STATS;
3852 static void bnxt_clear_ring_indices(struct bnxt *bp)
3859 for (i = 0; i < bp->cp_nr_rings; i++) {
3860 struct bnxt_napi *bnapi = bp->bnapi[i];
3861 struct bnxt_cp_ring_info *cpr;
3862 struct bnxt_rx_ring_info *rxr;
3863 struct bnxt_tx_ring_info *txr;
3868 cpr = &bnapi->cp_ring;
3869 cpr->cp_raw_cons = 0;
3871 txr = bnapi->tx_ring;
3877 rxr = bnapi->rx_ring;
3880 rxr->rx_agg_prod = 0;
3881 rxr->rx_sw_agg_prod = 0;
3882 rxr->rx_next_cons = 0;
3887 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3889 #ifdef CONFIG_RFS_ACCEL
3892 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3893 * safe to delete the hash table.
3895 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3896 struct hlist_head *head;
3897 struct hlist_node *tmp;
3898 struct bnxt_ntuple_filter *fltr;
3900 head = &bp->ntp_fltr_hash_tbl[i];
3901 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3902 hlist_del(&fltr->hash);
3907 kfree(bp->ntp_fltr_bmap);
3908 bp->ntp_fltr_bmap = NULL;
3910 bp->ntp_fltr_count = 0;
3914 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3916 #ifdef CONFIG_RFS_ACCEL
3919 if (!(bp->flags & BNXT_FLAG_RFS))
3922 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3923 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3925 bp->ntp_fltr_count = 0;
3926 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3930 if (!bp->ntp_fltr_bmap)
3939 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3941 bnxt_free_vnic_attributes(bp);
3942 bnxt_free_tx_rings(bp);
3943 bnxt_free_rx_rings(bp);
3944 bnxt_free_cp_rings(bp);
3945 bnxt_free_ntp_fltrs(bp, irq_re_init);
3947 bnxt_free_ring_stats(bp);
3948 bnxt_free_ring_grps(bp);
3949 bnxt_free_vnics(bp);
3950 kfree(bp->tx_ring_map);
3951 bp->tx_ring_map = NULL;
3959 bnxt_clear_ring_indices(bp);
3963 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3965 int i, j, rc, size, arr_size;
3969 /* Allocate bnapi mem pointer array and mem block for
3972 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3974 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3975 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3981 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3982 bp->bnapi[i] = bnapi;
3983 bp->bnapi[i]->index = i;
3984 bp->bnapi[i]->bp = bp;
3985 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3986 struct bnxt_cp_ring_info *cpr =
3987 &bp->bnapi[i]->cp_ring;
3989 cpr->cp_ring_struct.ring_mem.flags =
3990 BNXT_RMEM_RING_PTE_FLAG;
3994 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3995 sizeof(struct bnxt_rx_ring_info),
4000 for (i = 0; i < bp->rx_nr_rings; i++) {
4001 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4003 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4004 rxr->rx_ring_struct.ring_mem.flags =
4005 BNXT_RMEM_RING_PTE_FLAG;
4006 rxr->rx_agg_ring_struct.ring_mem.flags =
4007 BNXT_RMEM_RING_PTE_FLAG;
4009 rxr->bnapi = bp->bnapi[i];
4010 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4013 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4014 sizeof(struct bnxt_tx_ring_info),
4019 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4022 if (!bp->tx_ring_map)
4025 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4028 j = bp->rx_nr_rings;
4030 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4031 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4033 if (bp->flags & BNXT_FLAG_CHIP_P5)
4034 txr->tx_ring_struct.ring_mem.flags =
4035 BNXT_RMEM_RING_PTE_FLAG;
4036 txr->bnapi = bp->bnapi[j];
4037 bp->bnapi[j]->tx_ring = txr;
4038 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4039 if (i >= bp->tx_nr_rings_xdp) {
4040 txr->txq_index = i - bp->tx_nr_rings_xdp;
4041 bp->bnapi[j]->tx_int = bnxt_tx_int;
4043 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4044 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4048 rc = bnxt_alloc_stats(bp);
4052 rc = bnxt_alloc_ntp_fltrs(bp);
4056 rc = bnxt_alloc_vnics(bp);
4061 bnxt_init_ring_struct(bp);
4063 rc = bnxt_alloc_rx_rings(bp);
4067 rc = bnxt_alloc_tx_rings(bp);
4071 rc = bnxt_alloc_cp_rings(bp);
4075 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4076 BNXT_VNIC_UCAST_FLAG;
4077 rc = bnxt_alloc_vnic_attributes(bp);
4083 bnxt_free_mem(bp, true);
4087 static void bnxt_disable_int(struct bnxt *bp)
4094 for (i = 0; i < bp->cp_nr_rings; i++) {
4095 struct bnxt_napi *bnapi = bp->bnapi[i];
4096 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4097 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4099 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4100 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4104 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4106 struct bnxt_napi *bnapi = bp->bnapi[n];
4107 struct bnxt_cp_ring_info *cpr;
4109 cpr = &bnapi->cp_ring;
4110 return cpr->cp_ring_struct.map_idx;
4113 static void bnxt_disable_int_sync(struct bnxt *bp)
4117 atomic_inc(&bp->intr_sem);
4119 bnxt_disable_int(bp);
4120 for (i = 0; i < bp->cp_nr_rings; i++) {
4121 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4123 synchronize_irq(bp->irq_tbl[map_idx].vector);
4127 static void bnxt_enable_int(struct bnxt *bp)
4131 atomic_set(&bp->intr_sem, 0);
4132 for (i = 0; i < bp->cp_nr_rings; i++) {
4133 struct bnxt_napi *bnapi = bp->bnapi[i];
4134 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4136 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4140 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4141 u16 cmpl_ring, u16 target_id)
4143 struct input *req = request;
4145 req->req_type = cpu_to_le16(req_type);
4146 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4147 req->target_id = cpu_to_le16(target_id);
4148 if (bnxt_kong_hwrm_message(bp, req))
4149 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4151 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4154 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4157 case HWRM_ERR_CODE_SUCCESS:
4159 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4161 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4163 case HWRM_ERR_CODE_INVALID_PARAMS:
4164 case HWRM_ERR_CODE_INVALID_FLAGS:
4165 case HWRM_ERR_CODE_INVALID_ENABLES:
4166 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4167 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4169 case HWRM_ERR_CODE_NO_BUFFER:
4171 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4173 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4180 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4181 int timeout, bool silent)
4183 int i, intr_process, rc, tmo_count;
4184 struct input *req = msg;
4188 u16 cp_ring_id, len = 0;
4189 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4190 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4191 struct hwrm_short_input short_input = {0};
4192 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4193 u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
4194 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4195 u16 dst = BNXT_HWRM_CHNL_CHIMP;
4197 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4200 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4201 if (msg_len > bp->hwrm_max_ext_req_len ||
4202 !bp->hwrm_short_cmd_req_addr)
4206 if (bnxt_hwrm_kong_chnl(bp, req)) {
4207 dst = BNXT_HWRM_CHNL_KONG;
4208 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4209 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4210 resp = bp->hwrm_cmd_kong_resp_addr;
4211 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
4214 memset(resp, 0, PAGE_SIZE);
4215 cp_ring_id = le16_to_cpu(req->cmpl_ring);
4216 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4218 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4219 /* currently supports only one outstanding message */
4221 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4223 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4224 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4225 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4228 /* Set boundary for maximum extended request length for short
4229 * cmd format. If passed up from device use the max supported
4230 * internal req length.
4232 max_msg_len = bp->hwrm_max_ext_req_len;
4234 memcpy(short_cmd_req, req, msg_len);
4235 if (msg_len < max_msg_len)
4236 memset(short_cmd_req + msg_len, 0,
4237 max_msg_len - msg_len);
4239 short_input.req_type = req->req_type;
4240 short_input.signature =
4241 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4242 short_input.size = cpu_to_le16(msg_len);
4243 short_input.req_addr =
4244 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4246 data = (u32 *)&short_input;
4247 msg_len = sizeof(short_input);
4249 /* Sync memory write before updating doorbell */
4252 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4255 /* Write request msg to hwrm channel */
4256 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4258 for (i = msg_len; i < max_req_len; i += 4)
4259 writel(0, bp->bar0 + bar_offset + i);
4261 /* Ring channel doorbell */
4262 writel(1, bp->bar0 + doorbell_offset);
4264 if (!pci_is_enabled(bp->pdev))
4268 timeout = DFLT_HWRM_CMD_TIMEOUT;
4269 /* convert timeout to usec */
4273 /* Short timeout for the first few iterations:
4274 * number of loops = number of loops for short timeout +
4275 * number of loops for standard timeout.
4277 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4278 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4279 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4280 resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
4283 u16 seq_id = bp->hwrm_intr_seq_id;
4285 /* Wait until hwrm response cmpl interrupt is processed */
4286 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4288 /* Abort the wait for completion if the FW health
4291 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4293 /* on first few passes, just barely sleep */
4294 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4295 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4296 HWRM_SHORT_MAX_TIMEOUT);
4298 usleep_range(HWRM_MIN_TIMEOUT,
4302 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4304 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4305 le16_to_cpu(req->req_type));
4308 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4310 valid = resp_addr + len - 1;
4314 /* Check if response len is updated */
4315 for (i = 0; i < tmo_count; i++) {
4316 /* Abort the wait for completion if the FW health
4319 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4321 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4325 /* on first few passes, just barely sleep */
4326 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4327 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4328 HWRM_SHORT_MAX_TIMEOUT);
4330 usleep_range(HWRM_MIN_TIMEOUT,
4334 if (i >= tmo_count) {
4336 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4337 HWRM_TOTAL_TIMEOUT(i),
4338 le16_to_cpu(req->req_type),
4339 le16_to_cpu(req->seq_id), len);
4343 /* Last byte of resp contains valid bit */
4344 valid = resp_addr + len - 1;
4345 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4346 /* make sure we read from updated DMA memory */
4353 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4355 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4356 HWRM_TOTAL_TIMEOUT(i),
4357 le16_to_cpu(req->req_type),
4358 le16_to_cpu(req->seq_id), len,
4364 /* Zero valid bit for compatibility. Valid bit in an older spec
4365 * may become a new field in a newer spec. We must make sure that
4366 * a new field not implemented by old spec will read zero.
4369 rc = le16_to_cpu(resp->error_code);
4371 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4372 le16_to_cpu(resp->req_type),
4373 le16_to_cpu(resp->seq_id), rc);
4374 return bnxt_hwrm_to_stderr(rc);
4377 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4379 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4382 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4385 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4388 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4392 mutex_lock(&bp->hwrm_cmd_lock);
4393 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4394 mutex_unlock(&bp->hwrm_cmd_lock);
4398 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4403 mutex_lock(&bp->hwrm_cmd_lock);
4404 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4405 mutex_unlock(&bp->hwrm_cmd_lock);
4409 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4412 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4413 struct hwrm_func_drv_rgtr_input req = {0};
4414 DECLARE_BITMAP(async_events_bmap, 256);
4415 u32 *events = (u32 *)async_events_bmap;
4419 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4422 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4423 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4424 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4426 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4427 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4428 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4429 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4430 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4431 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4432 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4433 req.flags = cpu_to_le32(flags);
4434 req.ver_maj_8b = DRV_VER_MAJ;
4435 req.ver_min_8b = DRV_VER_MIN;
4436 req.ver_upd_8b = DRV_VER_UPD;
4437 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4438 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4439 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4445 memset(data, 0, sizeof(data));
4446 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4447 u16 cmd = bnxt_vf_req_snif[i];
4448 unsigned int bit, idx;
4452 data[idx] |= 1 << bit;
4455 for (i = 0; i < 8; i++)
4456 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4459 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4462 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4463 req.flags |= cpu_to_le32(
4464 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4466 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4467 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4468 u16 event_id = bnxt_async_events_arr[i];
4470 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4471 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4473 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4475 if (bmap && bmap_size) {
4476 for (i = 0; i < bmap_size; i++) {
4477 if (test_bit(i, bmap))
4478 __set_bit(i, async_events_bmap);
4481 for (i = 0; i < 8; i++)
4482 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4486 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4488 mutex_lock(&bp->hwrm_cmd_lock);
4489 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4491 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4493 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4494 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4496 mutex_unlock(&bp->hwrm_cmd_lock);
4500 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4502 struct hwrm_func_drv_unrgtr_input req = {0};
4504 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4507 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4508 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4511 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4514 struct hwrm_tunnel_dst_port_free_input req = {0};
4516 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4517 req.tunnel_type = tunnel_type;
4519 switch (tunnel_type) {
4520 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4521 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4523 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4524 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4530 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4532 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4537 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4541 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4542 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4544 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4546 req.tunnel_type = tunnel_type;
4547 req.tunnel_dst_port_val = port;
4549 mutex_lock(&bp->hwrm_cmd_lock);
4550 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4552 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4557 switch (tunnel_type) {
4558 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4559 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
4561 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4562 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
4569 mutex_unlock(&bp->hwrm_cmd_lock);
4573 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4575 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4576 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4578 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4579 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4581 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4582 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4583 req.mask = cpu_to_le32(vnic->rx_mask);
4584 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4587 #ifdef CONFIG_RFS_ACCEL
4588 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4589 struct bnxt_ntuple_filter *fltr)
4591 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4593 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4594 req.ntuple_filter_id = fltr->filter_id;
4595 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4598 #define BNXT_NTP_FLTR_FLAGS \
4599 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4600 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4601 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4602 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4603 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4604 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4605 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4606 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4607 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4608 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4609 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4610 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4611 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
4612 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4614 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
4615 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4617 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4618 struct bnxt_ntuple_filter *fltr)
4620 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4621 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4622 struct flow_keys *keys = &fltr->fkeys;
4623 struct bnxt_vnic_info *vnic;
4627 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4628 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4630 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4631 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4632 req.dst_id = cpu_to_le16(fltr->rxq);
4634 vnic = &bp->vnic_info[fltr->rxq + 1];
4635 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4637 req.flags = cpu_to_le32(flags);
4638 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4640 req.ethertype = htons(ETH_P_IP);
4641 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4642 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4643 req.ip_protocol = keys->basic.ip_proto;
4645 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4648 req.ethertype = htons(ETH_P_IPV6);
4650 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4651 *(struct in6_addr *)&req.src_ipaddr[0] =
4652 keys->addrs.v6addrs.src;
4653 *(struct in6_addr *)&req.dst_ipaddr[0] =
4654 keys->addrs.v6addrs.dst;
4655 for (i = 0; i < 4; i++) {
4656 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4657 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4660 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4661 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4662 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4663 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4665 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4666 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4668 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4671 req.src_port = keys->ports.src;
4672 req.src_port_mask = cpu_to_be16(0xffff);
4673 req.dst_port = keys->ports.dst;
4674 req.dst_port_mask = cpu_to_be16(0xffff);
4676 mutex_lock(&bp->hwrm_cmd_lock);
4677 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4679 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4680 fltr->filter_id = resp->ntuple_filter_id;
4682 mutex_unlock(&bp->hwrm_cmd_lock);
4687 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4691 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4692 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4694 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4695 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4696 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4698 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4699 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4701 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4702 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4703 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4704 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4705 req.l2_addr_mask[0] = 0xff;
4706 req.l2_addr_mask[1] = 0xff;
4707 req.l2_addr_mask[2] = 0xff;
4708 req.l2_addr_mask[3] = 0xff;
4709 req.l2_addr_mask[4] = 0xff;
4710 req.l2_addr_mask[5] = 0xff;
4712 mutex_lock(&bp->hwrm_cmd_lock);
4713 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4715 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4717 mutex_unlock(&bp->hwrm_cmd_lock);
4721 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4723 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4726 /* Any associated ntuple filters will also be cleared by firmware. */
4727 mutex_lock(&bp->hwrm_cmd_lock);
4728 for (i = 0; i < num_of_vnics; i++) {
4729 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4731 for (j = 0; j < vnic->uc_filter_count; j++) {
4732 struct hwrm_cfa_l2_filter_free_input req = {0};
4734 bnxt_hwrm_cmd_hdr_init(bp, &req,
4735 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4737 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4739 rc = _hwrm_send_message(bp, &req, sizeof(req),
4742 vnic->uc_filter_count = 0;
4744 mutex_unlock(&bp->hwrm_cmd_lock);
4749 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4751 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4752 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4753 struct hwrm_vnic_tpa_cfg_input req = {0};
4755 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4758 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4761 u16 mss = bp->dev->mtu - 40;
4762 u32 nsegs, n, segs = 0, flags;
4764 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4765 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4766 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4767 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4768 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4769 if (tpa_flags & BNXT_FLAG_GRO)
4770 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4772 req.flags = cpu_to_le32(flags);
4775 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4776 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4777 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4779 /* Number of segs are log2 units, and first packet is not
4780 * included as part of this units.
4782 if (mss <= BNXT_RX_PAGE_SIZE) {
4783 n = BNXT_RX_PAGE_SIZE / mss;
4784 nsegs = (MAX_SKB_FRAGS - 1) * n;
4786 n = mss / BNXT_RX_PAGE_SIZE;
4787 if (mss & (BNXT_RX_PAGE_SIZE - 1))
4789 nsegs = (MAX_SKB_FRAGS - n) / n;
4792 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4793 segs = MAX_TPA_SEGS_P5;
4794 max_aggs = bp->max_tpa;
4796 segs = ilog2(nsegs);
4798 req.max_agg_segs = cpu_to_le16(segs);
4799 req.max_aggs = cpu_to_le16(max_aggs);
4801 req.min_agg_len = cpu_to_le32(512);
4803 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4805 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4808 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4810 struct bnxt_ring_grp_info *grp_info;
4812 grp_info = &bp->grp_info[ring->grp_idx];
4813 return grp_info->cp_fw_ring_id;
4816 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4818 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4819 struct bnxt_napi *bnapi = rxr->bnapi;
4820 struct bnxt_cp_ring_info *cpr;
4822 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4823 return cpr->cp_ring_struct.fw_ring_id;
4825 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4829 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4831 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4832 struct bnxt_napi *bnapi = txr->bnapi;
4833 struct bnxt_cp_ring_info *cpr;
4835 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4836 return cpr->cp_ring_struct.fw_ring_id;
4838 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4842 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4844 u32 i, j, max_rings;
4845 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4846 struct hwrm_vnic_rss_cfg_input req = {0};
4848 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4849 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
4852 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4854 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4855 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4856 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4857 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4858 max_rings = bp->rx_nr_rings - 1;
4860 max_rings = bp->rx_nr_rings;
4865 /* Fill the RSS indirection table with ring group ids */
4866 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4869 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4872 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4873 req.hash_key_tbl_addr =
4874 cpu_to_le64(vnic->rss_hash_key_dma_addr);
4876 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4877 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4880 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4882 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4883 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4884 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4885 struct hwrm_vnic_rss_cfg_input req = {0};
4887 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4888 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4890 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4893 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4894 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4895 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4896 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4897 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4898 for (i = 0, k = 0; i < nr_ctxs; i++) {
4899 __le16 *ring_tbl = vnic->rss_table;
4902 req.ring_table_pair_index = i;
4903 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4904 for (j = 0; j < 64; j++) {
4907 ring_id = rxr->rx_ring_struct.fw_ring_id;
4908 *ring_tbl++ = cpu_to_le16(ring_id);
4909 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4910 *ring_tbl++ = cpu_to_le16(ring_id);
4913 if (k == max_rings) {
4915 rxr = &bp->rx_ring[0];
4918 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4925 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4927 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4928 struct hwrm_vnic_plcmodes_cfg_input req = {0};
4930 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4931 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4932 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4933 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4935 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4936 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4937 /* thresholds not implemented in firmware yet */
4938 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4939 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4940 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4941 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4944 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4947 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4949 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4950 req.rss_cos_lb_ctx_id =
4951 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
4953 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4954 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
4957 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4961 for (i = 0; i < bp->nr_vnics; i++) {
4962 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4964 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4965 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4966 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4969 bp->rsscos_nr_ctxs = 0;
4972 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
4975 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4976 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4977 bp->hwrm_cmd_resp_addr;
4979 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4982 mutex_lock(&bp->hwrm_cmd_lock);
4983 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4985 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
4986 le16_to_cpu(resp->rss_cos_lb_ctx_id);
4987 mutex_unlock(&bp->hwrm_cmd_lock);
4992 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4994 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4995 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4996 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4999 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5001 unsigned int ring = 0, grp_idx;
5002 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5003 struct hwrm_vnic_cfg_input req = {0};
5006 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5008 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5009 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5011 req.default_rx_ring_id =
5012 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5013 req.default_cmpl_ring_id =
5014 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5016 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5017 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5020 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5021 /* Only RSS support for now TBD: COS & LB */
5022 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5023 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5024 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5025 VNIC_CFG_REQ_ENABLES_MRU);
5026 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5028 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5029 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5030 VNIC_CFG_REQ_ENABLES_MRU);
5031 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5033 req.rss_rule = cpu_to_le16(0xffff);
5036 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5037 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5038 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5039 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5041 req.cos_rule = cpu_to_le16(0xffff);
5044 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5046 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5048 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5049 ring = bp->rx_nr_rings - 1;
5051 grp_idx = bp->rx_ring[ring].bnapi->index;
5052 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5053 req.lb_rule = cpu_to_le16(0xffff);
5055 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
5058 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5059 #ifdef CONFIG_BNXT_SRIOV
5061 def_vlan = bp->vf.vlan;
5063 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5064 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5065 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5066 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5068 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5071 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5075 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5076 struct hwrm_vnic_free_input req = {0};
5078 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5080 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5082 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5083 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5088 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5092 for (i = 0; i < bp->nr_vnics; i++)
5093 bnxt_hwrm_vnic_free_one(bp, i);
5096 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5097 unsigned int start_rx_ring_idx,
5098 unsigned int nr_rings)
5101 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5102 struct hwrm_vnic_alloc_input req = {0};
5103 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5104 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5106 if (bp->flags & BNXT_FLAG_CHIP_P5)
5107 goto vnic_no_ring_grps;
5109 /* map ring groups to this vnic */
5110 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5111 grp_idx = bp->rx_ring[i].bnapi->index;
5112 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5113 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5117 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5121 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5122 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5124 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5126 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5128 mutex_lock(&bp->hwrm_cmd_lock);
5129 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5131 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5132 mutex_unlock(&bp->hwrm_cmd_lock);
5136 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5138 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5139 struct hwrm_vnic_qcaps_input req = {0};
5142 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5143 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5144 if (bp->hwrm_spec_code < 0x10600)
5147 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5148 mutex_lock(&bp->hwrm_cmd_lock);
5149 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5151 u32 flags = le32_to_cpu(resp->flags);
5153 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5154 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5155 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5157 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5158 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5159 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5161 bp->hw_ring_stats_size =
5162 sizeof(struct ctx_hw_stats_ext);
5164 mutex_unlock(&bp->hwrm_cmd_lock);
5168 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5173 if (bp->flags & BNXT_FLAG_CHIP_P5)
5176 mutex_lock(&bp->hwrm_cmd_lock);
5177 for (i = 0; i < bp->rx_nr_rings; i++) {
5178 struct hwrm_ring_grp_alloc_input req = {0};
5179 struct hwrm_ring_grp_alloc_output *resp =
5180 bp->hwrm_cmd_resp_addr;
5181 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5183 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5185 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5186 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5187 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5188 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5190 rc = _hwrm_send_message(bp, &req, sizeof(req),
5195 bp->grp_info[grp_idx].fw_grp_id =
5196 le32_to_cpu(resp->ring_group_id);
5198 mutex_unlock(&bp->hwrm_cmd_lock);
5202 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5206 struct hwrm_ring_grp_free_input req = {0};
5208 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5211 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5213 mutex_lock(&bp->hwrm_cmd_lock);
5214 for (i = 0; i < bp->cp_nr_rings; i++) {
5215 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5218 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5220 rc = _hwrm_send_message(bp, &req, sizeof(req),
5222 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5224 mutex_unlock(&bp->hwrm_cmd_lock);
5228 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5229 struct bnxt_ring_struct *ring,
5230 u32 ring_type, u32 map_index)
5232 int rc = 0, err = 0;
5233 struct hwrm_ring_alloc_input req = {0};
5234 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5235 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5236 struct bnxt_ring_grp_info *grp_info;
5239 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5242 if (rmem->nr_pages > 1) {
5243 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5244 /* Page size is in log2 units */
5245 req.page_size = BNXT_PAGE_SHIFT;
5246 req.page_tbl_depth = 1;
5248 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
5251 /* Association of ring index with doorbell index and MSIX number */
5252 req.logical_id = cpu_to_le16(map_index);
5254 switch (ring_type) {
5255 case HWRM_RING_ALLOC_TX: {
5256 struct bnxt_tx_ring_info *txr;
5258 txr = container_of(ring, struct bnxt_tx_ring_info,
5260 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5261 /* Association of transmit ring with completion ring */
5262 grp_info = &bp->grp_info[ring->grp_idx];
5263 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5264 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5265 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5266 req.queue_id = cpu_to_le16(ring->queue_id);
5269 case HWRM_RING_ALLOC_RX:
5270 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5271 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5272 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5275 /* Association of rx ring with stats context */
5276 grp_info = &bp->grp_info[ring->grp_idx];
5277 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5278 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5279 req.enables |= cpu_to_le32(
5280 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5281 if (NET_IP_ALIGN == 2)
5282 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5283 req.flags = cpu_to_le16(flags);
5286 case HWRM_RING_ALLOC_AGG:
5287 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5288 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5289 /* Association of agg ring with rx ring */
5290 grp_info = &bp->grp_info[ring->grp_idx];
5291 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5292 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5293 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5294 req.enables |= cpu_to_le32(
5295 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5296 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5298 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5300 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5302 case HWRM_RING_ALLOC_CMPL:
5303 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5304 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5305 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5306 /* Association of cp ring with nq */
5307 grp_info = &bp->grp_info[map_index];
5308 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5309 req.cq_handle = cpu_to_le64(ring->handle);
5310 req.enables |= cpu_to_le32(
5311 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5312 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5313 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5316 case HWRM_RING_ALLOC_NQ:
5317 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5318 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5319 if (bp->flags & BNXT_FLAG_USING_MSIX)
5320 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5323 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5328 mutex_lock(&bp->hwrm_cmd_lock);
5329 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5330 err = le16_to_cpu(resp->error_code);
5331 ring_id = le16_to_cpu(resp->ring_id);
5332 mutex_unlock(&bp->hwrm_cmd_lock);
5335 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5336 ring_type, rc, err);
5339 ring->fw_ring_id = ring_id;
5343 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5348 struct hwrm_func_cfg_input req = {0};
5350 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5351 req.fid = cpu_to_le16(0xffff);
5352 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5353 req.async_event_cr = cpu_to_le16(idx);
5354 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5356 struct hwrm_func_vf_cfg_input req = {0};
5358 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5360 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5361 req.async_event_cr = cpu_to_le16(idx);
5362 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5367 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5368 u32 map_idx, u32 xid)
5370 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5372 db->doorbell = bp->bar1 + 0x10000;
5374 db->doorbell = bp->bar1 + 0x4000;
5375 switch (ring_type) {
5376 case HWRM_RING_ALLOC_TX:
5377 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5379 case HWRM_RING_ALLOC_RX:
5380 case HWRM_RING_ALLOC_AGG:
5381 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5383 case HWRM_RING_ALLOC_CMPL:
5384 db->db_key64 = DBR_PATH_L2;
5386 case HWRM_RING_ALLOC_NQ:
5387 db->db_key64 = DBR_PATH_L2;
5390 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5392 db->doorbell = bp->bar1 + map_idx * 0x80;
5393 switch (ring_type) {
5394 case HWRM_RING_ALLOC_TX:
5395 db->db_key32 = DB_KEY_TX;
5397 case HWRM_RING_ALLOC_RX:
5398 case HWRM_RING_ALLOC_AGG:
5399 db->db_key32 = DB_KEY_RX;
5401 case HWRM_RING_ALLOC_CMPL:
5402 db->db_key32 = DB_KEY_CP;
5408 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5410 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5414 if (bp->flags & BNXT_FLAG_CHIP_P5)
5415 type = HWRM_RING_ALLOC_NQ;
5417 type = HWRM_RING_ALLOC_CMPL;
5418 for (i = 0; i < bp->cp_nr_rings; i++) {
5419 struct bnxt_napi *bnapi = bp->bnapi[i];
5420 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5421 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5422 u32 map_idx = ring->map_idx;
5423 unsigned int vector;
5425 vector = bp->irq_tbl[map_idx].vector;
5426 disable_irq_nosync(vector);
5427 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5432 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5433 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5435 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5438 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5440 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5444 type = HWRM_RING_ALLOC_TX;
5445 for (i = 0; i < bp->tx_nr_rings; i++) {
5446 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5447 struct bnxt_ring_struct *ring;
5450 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5451 struct bnxt_napi *bnapi = txr->bnapi;
5452 struct bnxt_cp_ring_info *cpr, *cpr2;
5453 u32 type2 = HWRM_RING_ALLOC_CMPL;
5455 cpr = &bnapi->cp_ring;
5456 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5457 ring = &cpr2->cp_ring_struct;
5458 ring->handle = BNXT_TX_HDL;
5459 map_idx = bnapi->index;
5460 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5463 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5465 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5467 ring = &txr->tx_ring_struct;
5469 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5472 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5475 type = HWRM_RING_ALLOC_RX;
5476 for (i = 0; i < bp->rx_nr_rings; i++) {
5477 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5478 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5479 struct bnxt_napi *bnapi = rxr->bnapi;
5480 u32 map_idx = bnapi->index;
5482 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5485 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5486 /* If we have agg rings, post agg buffers first. */
5488 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5489 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5490 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5491 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5492 u32 type2 = HWRM_RING_ALLOC_CMPL;
5493 struct bnxt_cp_ring_info *cpr2;
5495 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5496 ring = &cpr2->cp_ring_struct;
5497 ring->handle = BNXT_RX_HDL;
5498 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5501 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5503 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5508 type = HWRM_RING_ALLOC_AGG;
5509 for (i = 0; i < bp->rx_nr_rings; i++) {
5510 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5511 struct bnxt_ring_struct *ring =
5512 &rxr->rx_agg_ring_struct;
5513 u32 grp_idx = ring->grp_idx;
5514 u32 map_idx = grp_idx + bp->rx_nr_rings;
5516 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5520 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5522 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5523 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5524 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5531 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5532 struct bnxt_ring_struct *ring,
5533 u32 ring_type, int cmpl_ring_id)
5536 struct hwrm_ring_free_input req = {0};
5537 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5540 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
5543 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5544 req.ring_type = ring_type;
5545 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5547 mutex_lock(&bp->hwrm_cmd_lock);
5548 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5549 error_code = le16_to_cpu(resp->error_code);
5550 mutex_unlock(&bp->hwrm_cmd_lock);
5552 if (rc || error_code) {
5553 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5554 ring_type, rc, error_code);
5560 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5568 for (i = 0; i < bp->tx_nr_rings; i++) {
5569 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5570 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5572 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5573 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5575 hwrm_ring_free_send_msg(bp, ring,
5576 RING_FREE_REQ_RING_TYPE_TX,
5577 close_path ? cmpl_ring_id :
5578 INVALID_HW_RING_ID);
5579 ring->fw_ring_id = INVALID_HW_RING_ID;
5583 for (i = 0; i < bp->rx_nr_rings; i++) {
5584 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5585 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5586 u32 grp_idx = rxr->bnapi->index;
5588 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5589 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5591 hwrm_ring_free_send_msg(bp, ring,
5592 RING_FREE_REQ_RING_TYPE_RX,
5593 close_path ? cmpl_ring_id :
5594 INVALID_HW_RING_ID);
5595 ring->fw_ring_id = INVALID_HW_RING_ID;
5596 bp->grp_info[grp_idx].rx_fw_ring_id =
5601 if (bp->flags & BNXT_FLAG_CHIP_P5)
5602 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5604 type = RING_FREE_REQ_RING_TYPE_RX;
5605 for (i = 0; i < bp->rx_nr_rings; i++) {
5606 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5607 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5608 u32 grp_idx = rxr->bnapi->index;
5610 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5611 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5613 hwrm_ring_free_send_msg(bp, ring, type,
5614 close_path ? cmpl_ring_id :
5615 INVALID_HW_RING_ID);
5616 ring->fw_ring_id = INVALID_HW_RING_ID;
5617 bp->grp_info[grp_idx].agg_fw_ring_id =
5622 /* The completion rings are about to be freed. After that the
5623 * IRQ doorbell will not work anymore. So we need to disable
5626 bnxt_disable_int_sync(bp);
5628 if (bp->flags & BNXT_FLAG_CHIP_P5)
5629 type = RING_FREE_REQ_RING_TYPE_NQ;
5631 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5632 for (i = 0; i < bp->cp_nr_rings; i++) {
5633 struct bnxt_napi *bnapi = bp->bnapi[i];
5634 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5635 struct bnxt_ring_struct *ring;
5638 for (j = 0; j < 2; j++) {
5639 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5642 ring = &cpr2->cp_ring_struct;
5643 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5645 hwrm_ring_free_send_msg(bp, ring,
5646 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5647 INVALID_HW_RING_ID);
5648 ring->fw_ring_id = INVALID_HW_RING_ID;
5651 ring = &cpr->cp_ring_struct;
5652 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5653 hwrm_ring_free_send_msg(bp, ring, type,
5654 INVALID_HW_RING_ID);
5655 ring->fw_ring_id = INVALID_HW_RING_ID;
5656 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5661 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5664 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5666 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5667 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5668 struct hwrm_func_qcfg_input req = {0};
5671 if (bp->hwrm_spec_code < 0x10601)
5674 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5675 req.fid = cpu_to_le16(0xffff);
5676 mutex_lock(&bp->hwrm_cmd_lock);
5677 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5679 mutex_unlock(&bp->hwrm_cmd_lock);
5683 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5684 if (BNXT_NEW_RM(bp)) {
5687 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5688 hw_resc->resv_hw_ring_grps =
5689 le32_to_cpu(resp->alloc_hw_ring_grps);
5690 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5691 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5692 stats = le16_to_cpu(resp->alloc_stat_ctx);
5693 hw_resc->resv_irqs = cp;
5694 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5695 int rx = hw_resc->resv_rx_rings;
5696 int tx = hw_resc->resv_tx_rings;
5698 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5700 if (cp < (rx + tx)) {
5701 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5702 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5704 hw_resc->resv_rx_rings = rx;
5705 hw_resc->resv_tx_rings = tx;
5707 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5708 hw_resc->resv_hw_ring_grps = rx;
5710 hw_resc->resv_cp_rings = cp;
5711 hw_resc->resv_stat_ctxs = stats;
5713 mutex_unlock(&bp->hwrm_cmd_lock);
5717 /* Caller must hold bp->hwrm_cmd_lock */
5718 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5720 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5721 struct hwrm_func_qcfg_input req = {0};
5724 if (bp->hwrm_spec_code < 0x10601)
5727 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5728 req.fid = cpu_to_le16(fid);
5729 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5731 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5736 static bool bnxt_rfs_supported(struct bnxt *bp);
5739 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5740 int tx_rings, int rx_rings, int ring_grps,
5741 int cp_rings, int stats, int vnics)
5745 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5746 req->fid = cpu_to_le16(0xffff);
5747 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5748 req->num_tx_rings = cpu_to_le16(tx_rings);
5749 if (BNXT_NEW_RM(bp)) {
5750 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5751 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5752 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5753 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5754 enables |= tx_rings + ring_grps ?
5755 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5756 enables |= rx_rings ?
5757 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5759 enables |= cp_rings ?
5760 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5761 enables |= ring_grps ?
5762 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5763 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5765 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
5767 req->num_rx_rings = cpu_to_le16(rx_rings);
5768 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5769 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5770 req->num_msix = cpu_to_le16(cp_rings);
5771 req->num_rsscos_ctxs =
5772 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5774 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5775 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5776 req->num_rsscos_ctxs = cpu_to_le16(1);
5777 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5778 bnxt_rfs_supported(bp))
5779 req->num_rsscos_ctxs =
5780 cpu_to_le16(ring_grps + 1);
5782 req->num_stat_ctxs = cpu_to_le16(stats);
5783 req->num_vnics = cpu_to_le16(vnics);
5785 req->enables = cpu_to_le32(enables);
5789 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5790 struct hwrm_func_vf_cfg_input *req, int tx_rings,
5791 int rx_rings, int ring_grps, int cp_rings,
5792 int stats, int vnics)
5796 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5797 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5798 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5799 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5800 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5801 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5802 enables |= tx_rings + ring_grps ?
5803 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5805 enables |= cp_rings ?
5806 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5807 enables |= ring_grps ?
5808 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5810 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
5811 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
5813 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
5814 req->num_tx_rings = cpu_to_le16(tx_rings);
5815 req->num_rx_rings = cpu_to_le16(rx_rings);
5816 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5817 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5818 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5820 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5821 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5822 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5824 req->num_stat_ctxs = cpu_to_le16(stats);
5825 req->num_vnics = cpu_to_le16(vnics);
5827 req->enables = cpu_to_le32(enables);
5831 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5832 int ring_grps, int cp_rings, int stats, int vnics)
5834 struct hwrm_func_cfg_input req = {0};
5837 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5838 cp_rings, stats, vnics);
5842 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5846 if (bp->hwrm_spec_code < 0x10601)
5847 bp->hw_resc.resv_tx_rings = tx_rings;
5849 rc = bnxt_hwrm_get_rings(bp);
5854 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5855 int ring_grps, int cp_rings, int stats, int vnics)
5857 struct hwrm_func_vf_cfg_input req = {0};
5860 if (!BNXT_NEW_RM(bp)) {
5861 bp->hw_resc.resv_tx_rings = tx_rings;
5865 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5866 cp_rings, stats, vnics);
5867 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5871 rc = bnxt_hwrm_get_rings(bp);
5875 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
5876 int cp, int stat, int vnic)
5879 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5882 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5886 int bnxt_nq_rings_in_use(struct bnxt *bp)
5888 int cp = bp->cp_nr_rings;
5889 int ulp_msix, ulp_base;
5891 ulp_msix = bnxt_get_ulp_msix_num(bp);
5893 ulp_base = bnxt_get_ulp_msix_base(bp);
5895 if ((ulp_base + ulp_msix) > cp)
5896 cp = ulp_base + ulp_msix;
5901 static int bnxt_cp_rings_in_use(struct bnxt *bp)
5905 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5906 return bnxt_nq_rings_in_use(bp);
5908 cp = bp->tx_nr_rings + bp->rx_nr_rings;
5912 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5914 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
5915 int cp = bp->cp_nr_rings;
5920 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
5921 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
5923 return cp + ulp_stat;
5926 static bool bnxt_need_reserve_rings(struct bnxt *bp)
5928 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5929 int cp = bnxt_cp_rings_in_use(bp);
5930 int nq = bnxt_nq_rings_in_use(bp);
5931 int rx = bp->rx_nr_rings, stat;
5932 int vnic = 1, grp = rx;
5934 if (bp->hwrm_spec_code < 0x10601)
5937 if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5940 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5942 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5944 stat = bnxt_get_func_stat_ctxs(bp);
5945 if (BNXT_NEW_RM(bp) &&
5946 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
5947 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
5948 (hw_resc->resv_hw_ring_grps != grp &&
5949 !(bp->flags & BNXT_FLAG_CHIP_P5))))
5951 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
5952 hw_resc->resv_irqs != nq)
5957 static int __bnxt_reserve_rings(struct bnxt *bp)
5959 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5960 int cp = bnxt_nq_rings_in_use(bp);
5961 int tx = bp->tx_nr_rings;
5962 int rx = bp->rx_nr_rings;
5963 int grp, rx_rings, rc;
5967 if (!bnxt_need_reserve_rings(bp))
5970 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5972 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5974 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5976 grp = bp->rx_nr_rings;
5977 stat = bnxt_get_func_stat_ctxs(bp);
5979 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
5983 tx = hw_resc->resv_tx_rings;
5984 if (BNXT_NEW_RM(bp)) {
5985 rx = hw_resc->resv_rx_rings;
5986 cp = hw_resc->resv_irqs;
5987 grp = hw_resc->resv_hw_ring_grps;
5988 vnic = hw_resc->resv_vnics;
5989 stat = hw_resc->resv_stat_ctxs;
5993 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5997 if (netif_running(bp->dev))
6000 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6001 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6002 bp->dev->hw_features &= ~NETIF_F_LRO;
6003 bp->dev->features &= ~NETIF_F_LRO;
6004 bnxt_set_ring_params(bp);
6007 rx_rings = min_t(int, rx_rings, grp);
6008 cp = min_t(int, cp, bp->cp_nr_rings);
6009 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6010 stat -= bnxt_get_ulp_stat_ctxs(bp);
6011 cp = min_t(int, cp, stat);
6012 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6013 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6015 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6016 bp->tx_nr_rings = tx;
6017 bp->rx_nr_rings = rx_rings;
6018 bp->cp_nr_rings = cp;
6020 if (!tx || !rx || !cp || !grp || !vnic || !stat)
6026 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6027 int ring_grps, int cp_rings, int stats,
6030 struct hwrm_func_vf_cfg_input req = {0};
6034 if (!BNXT_NEW_RM(bp))
6037 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6038 cp_rings, stats, vnics);
6039 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6040 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6041 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6042 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6043 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6044 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6045 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6046 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6048 req.flags = cpu_to_le32(flags);
6049 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6053 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6054 int ring_grps, int cp_rings, int stats,
6057 struct hwrm_func_cfg_input req = {0};
6061 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6062 cp_rings, stats, vnics);
6063 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6064 if (BNXT_NEW_RM(bp)) {
6065 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6066 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6067 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6068 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6069 if (bp->flags & BNXT_FLAG_CHIP_P5)
6070 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6071 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6073 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6076 req.flags = cpu_to_le32(flags);
6077 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6081 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6082 int ring_grps, int cp_rings, int stats,
6085 if (bp->hwrm_spec_code < 0x10801)
6089 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6090 ring_grps, cp_rings, stats,
6093 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6094 cp_rings, stats, vnics);
6097 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6099 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6100 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6101 struct hwrm_ring_aggint_qcaps_input req = {0};
6104 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6105 coal_cap->num_cmpl_dma_aggr_max = 63;
6106 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6107 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6108 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6109 coal_cap->int_lat_tmr_min_max = 65535;
6110 coal_cap->int_lat_tmr_max_max = 65535;
6111 coal_cap->num_cmpl_aggr_int_max = 65535;
6112 coal_cap->timer_units = 80;
6114 if (bp->hwrm_spec_code < 0x10902)
6117 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6118 mutex_lock(&bp->hwrm_cmd_lock);
6119 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6121 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6122 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6123 coal_cap->num_cmpl_dma_aggr_max =
6124 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6125 coal_cap->num_cmpl_dma_aggr_during_int_max =
6126 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6127 coal_cap->cmpl_aggr_dma_tmr_max =
6128 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6129 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6130 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6131 coal_cap->int_lat_tmr_min_max =
6132 le16_to_cpu(resp->int_lat_tmr_min_max);
6133 coal_cap->int_lat_tmr_max_max =
6134 le16_to_cpu(resp->int_lat_tmr_max_max);
6135 coal_cap->num_cmpl_aggr_int_max =
6136 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6137 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6139 mutex_unlock(&bp->hwrm_cmd_lock);
6142 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6144 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6146 return usec * 1000 / coal_cap->timer_units;
6149 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6150 struct bnxt_coal *hw_coal,
6151 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6153 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6154 u32 cmpl_params = coal_cap->cmpl_params;
6155 u16 val, tmr, max, flags = 0;
6157 max = hw_coal->bufs_per_record * 128;
6158 if (hw_coal->budget)
6159 max = hw_coal->bufs_per_record * hw_coal->budget;
6160 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6162 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6163 req->num_cmpl_aggr_int = cpu_to_le16(val);
6165 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6166 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6168 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6169 coal_cap->num_cmpl_dma_aggr_during_int_max);
6170 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6172 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6173 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6174 req->int_lat_tmr_max = cpu_to_le16(tmr);
6176 /* min timer set to 1/2 of interrupt timer */
6177 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6179 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6180 req->int_lat_tmr_min = cpu_to_le16(val);
6181 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6184 /* buf timer set to 1/4 of interrupt timer */
6185 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6186 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6189 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6190 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6191 val = clamp_t(u16, tmr, 1,
6192 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6193 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
6195 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6198 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6199 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6200 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6201 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6202 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6203 req->flags = cpu_to_le16(flags);
6204 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6207 /* Caller holds bp->hwrm_cmd_lock */
6208 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6209 struct bnxt_coal *hw_coal)
6211 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6212 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6213 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6214 u32 nq_params = coal_cap->nq_params;
6217 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6220 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6222 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6224 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6226 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6227 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6228 req.int_lat_tmr_min = cpu_to_le16(tmr);
6229 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6230 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6233 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6235 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6236 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6237 struct bnxt_coal coal;
6239 /* Tick values in micro seconds.
6240 * 1 coal_buf x bufs_per_record = 1 completion record.
6242 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6244 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6245 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6247 if (!bnapi->rx_ring)
6250 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6251 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6253 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6255 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6257 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6261 int bnxt_hwrm_set_coal(struct bnxt *bp)
6264 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6267 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6268 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6269 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6270 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6272 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6273 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6275 mutex_lock(&bp->hwrm_cmd_lock);
6276 for (i = 0; i < bp->cp_nr_rings; i++) {
6277 struct bnxt_napi *bnapi = bp->bnapi[i];
6278 struct bnxt_coal *hw_coal;
6282 if (!bnapi->rx_ring) {
6283 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6286 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6288 req->ring_id = cpu_to_le16(ring_id);
6290 rc = _hwrm_send_message(bp, req, sizeof(*req),
6295 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6298 if (bnapi->rx_ring && bnapi->tx_ring) {
6300 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6301 req->ring_id = cpu_to_le16(ring_id);
6302 rc = _hwrm_send_message(bp, req, sizeof(*req),
6308 hw_coal = &bp->rx_coal;
6310 hw_coal = &bp->tx_coal;
6311 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6313 mutex_unlock(&bp->hwrm_cmd_lock);
6317 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6320 struct hwrm_stat_ctx_free_input req = {0};
6325 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6328 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6330 mutex_lock(&bp->hwrm_cmd_lock);
6331 for (i = 0; i < bp->cp_nr_rings; i++) {
6332 struct bnxt_napi *bnapi = bp->bnapi[i];
6333 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6335 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6336 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6338 rc = _hwrm_send_message(bp, &req, sizeof(req),
6341 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6344 mutex_unlock(&bp->hwrm_cmd_lock);
6348 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6351 struct hwrm_stat_ctx_alloc_input req = {0};
6352 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6354 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6357 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6359 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6360 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6362 mutex_lock(&bp->hwrm_cmd_lock);
6363 for (i = 0; i < bp->cp_nr_rings; i++) {
6364 struct bnxt_napi *bnapi = bp->bnapi[i];
6365 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6367 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
6369 rc = _hwrm_send_message(bp, &req, sizeof(req),
6374 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6376 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6378 mutex_unlock(&bp->hwrm_cmd_lock);
6382 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6384 struct hwrm_func_qcfg_input req = {0};
6385 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6389 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6390 req.fid = cpu_to_le16(0xffff);
6391 mutex_lock(&bp->hwrm_cmd_lock);
6392 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6394 goto func_qcfg_exit;
6396 #ifdef CONFIG_BNXT_SRIOV
6398 struct bnxt_vf_info *vf = &bp->vf;
6400 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6402 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6405 flags = le16_to_cpu(resp->flags);
6406 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6407 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6408 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6409 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6410 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6412 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6413 bp->flags |= BNXT_FLAG_MULTI_HOST;
6415 switch (resp->port_partition_type) {
6416 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6417 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6418 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6419 bp->port_partition_type = resp->port_partition_type;
6422 if (bp->hwrm_spec_code < 0x10707 ||
6423 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6424 bp->br_mode = BRIDGE_MODE_VEB;
6425 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6426 bp->br_mode = BRIDGE_MODE_VEPA;
6428 bp->br_mode = BRIDGE_MODE_UNDEF;
6430 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6432 bp->max_mtu = BNXT_MAX_MTU;
6435 mutex_unlock(&bp->hwrm_cmd_lock);
6439 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6441 struct hwrm_func_backing_store_qcaps_input req = {0};
6442 struct hwrm_func_backing_store_qcaps_output *resp =
6443 bp->hwrm_cmd_resp_addr;
6446 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6449 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6450 mutex_lock(&bp->hwrm_cmd_lock);
6451 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6453 struct bnxt_ctx_pg_info *ctx_pg;
6454 struct bnxt_ctx_mem_info *ctx;
6457 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6462 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
6468 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6469 ctx->tqm_mem[i] = ctx_pg;
6472 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6473 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6474 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6475 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6476 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6477 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6478 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6479 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6480 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6481 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6482 ctx->vnic_max_vnic_entries =
6483 le16_to_cpu(resp->vnic_max_vnic_entries);
6484 ctx->vnic_max_ring_table_entries =
6485 le16_to_cpu(resp->vnic_max_ring_table_entries);
6486 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6487 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6488 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6489 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6490 ctx->tqm_min_entries_per_ring =
6491 le32_to_cpu(resp->tqm_min_entries_per_ring);
6492 ctx->tqm_max_entries_per_ring =
6493 le32_to_cpu(resp->tqm_max_entries_per_ring);
6494 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6495 if (!ctx->tqm_entries_multiple)
6496 ctx->tqm_entries_multiple = 1;
6497 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6498 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6499 ctx->mrav_num_entries_units =
6500 le16_to_cpu(resp->mrav_num_entries_units);
6501 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6502 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6503 ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
6508 mutex_unlock(&bp->hwrm_cmd_lock);
6512 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6517 if (BNXT_PAGE_SHIFT == 13)
6519 else if (BNXT_PAGE_SIZE == 16)
6523 if (rmem->depth >= 1) {
6524 if (rmem->depth == 2)
6528 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6530 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6534 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6535 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6536 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6537 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6538 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6539 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6541 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6543 struct hwrm_func_backing_store_cfg_input req = {0};
6544 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6545 struct bnxt_ctx_pg_info *ctx_pg;
6546 __le32 *num_entries;
6556 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6557 req.enables = cpu_to_le32(enables);
6559 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6560 ctx_pg = &ctx->qp_mem;
6561 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6562 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6563 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6564 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6565 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6566 &req.qpc_pg_size_qpc_lvl,
6569 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6570 ctx_pg = &ctx->srq_mem;
6571 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6572 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6573 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6574 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6575 &req.srq_pg_size_srq_lvl,
6578 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6579 ctx_pg = &ctx->cq_mem;
6580 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6581 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6582 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6583 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6586 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6587 ctx_pg = &ctx->vnic_mem;
6588 req.vnic_num_vnic_entries =
6589 cpu_to_le16(ctx->vnic_max_vnic_entries);
6590 req.vnic_num_ring_table_entries =
6591 cpu_to_le16(ctx->vnic_max_ring_table_entries);
6592 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6593 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6594 &req.vnic_pg_size_vnic_lvl,
6595 &req.vnic_page_dir);
6597 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6598 ctx_pg = &ctx->stat_mem;
6599 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6600 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6601 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6602 &req.stat_pg_size_stat_lvl,
6603 &req.stat_page_dir);
6605 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6606 ctx_pg = &ctx->mrav_mem;
6607 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6608 if (ctx->mrav_num_entries_units)
6610 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
6611 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6612 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6613 &req.mrav_pg_size_mrav_lvl,
6614 &req.mrav_page_dir);
6616 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6617 ctx_pg = &ctx->tim_mem;
6618 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6619 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6620 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6621 &req.tim_pg_size_tim_lvl,
6624 for (i = 0, num_entries = &req.tqm_sp_num_entries,
6625 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6626 pg_dir = &req.tqm_sp_page_dir,
6627 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6628 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6629 if (!(enables & ena))
6632 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6633 ctx_pg = ctx->tqm_mem[i];
6634 *num_entries = cpu_to_le32(ctx_pg->entries);
6635 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6637 req.flags = cpu_to_le32(flags);
6638 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6642 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6643 struct bnxt_ctx_pg_info *ctx_pg)
6645 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6647 rmem->page_size = BNXT_PAGE_SIZE;
6648 rmem->pg_arr = ctx_pg->ctx_pg_arr;
6649 rmem->dma_arr = ctx_pg->ctx_dma_arr;
6650 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
6651 if (rmem->depth >= 1)
6652 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
6653 return bnxt_alloc_ring(bp, rmem);
6656 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6657 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6658 u8 depth, bool use_init_val)
6660 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6666 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6667 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6668 ctx_pg->nr_pages = 0;
6671 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6675 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6677 if (!ctx_pg->ctx_pg_tbl)
6679 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6680 rmem->nr_pages = nr_tbls;
6681 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6684 for (i = 0; i < nr_tbls; i++) {
6685 struct bnxt_ctx_pg_info *pg_tbl;
6687 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6690 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6691 rmem = &pg_tbl->ring_mem;
6692 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6693 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6695 rmem->nr_pages = MAX_CTX_PAGES;
6697 rmem->init_val = bp->ctx->ctx_kind_initializer;
6698 if (i == (nr_tbls - 1)) {
6699 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6702 rmem->nr_pages = rem;
6704 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6709 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6710 if (rmem->nr_pages > 1 || depth)
6713 rmem->init_val = bp->ctx->ctx_kind_initializer;
6714 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6719 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6720 struct bnxt_ctx_pg_info *ctx_pg)
6722 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6724 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6725 ctx_pg->ctx_pg_tbl) {
6726 int i, nr_tbls = rmem->nr_pages;
6728 for (i = 0; i < nr_tbls; i++) {
6729 struct bnxt_ctx_pg_info *pg_tbl;
6730 struct bnxt_ring_mem_info *rmem2;
6732 pg_tbl = ctx_pg->ctx_pg_tbl[i];
6735 rmem2 = &pg_tbl->ring_mem;
6736 bnxt_free_ring(bp, rmem2);
6737 ctx_pg->ctx_pg_arr[i] = NULL;
6739 ctx_pg->ctx_pg_tbl[i] = NULL;
6741 kfree(ctx_pg->ctx_pg_tbl);
6742 ctx_pg->ctx_pg_tbl = NULL;
6744 bnxt_free_ring(bp, rmem);
6745 ctx_pg->nr_pages = 0;
6748 static void bnxt_free_ctx_mem(struct bnxt *bp)
6750 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6756 if (ctx->tqm_mem[0]) {
6757 for (i = 0; i < bp->max_q + 1; i++)
6758 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
6759 kfree(ctx->tqm_mem[0]);
6760 ctx->tqm_mem[0] = NULL;
6763 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6764 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
6765 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6766 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6767 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6768 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6769 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
6770 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6773 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6775 struct bnxt_ctx_pg_info *ctx_pg;
6776 struct bnxt_ctx_mem_info *ctx;
6777 u32 mem_size, ena, entries;
6784 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6786 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6791 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6794 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
6800 ctx_pg = &ctx->qp_mem;
6801 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6803 mem_size = ctx->qp_entry_size * ctx_pg->entries;
6804 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
6808 ctx_pg = &ctx->srq_mem;
6809 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
6810 mem_size = ctx->srq_entry_size * ctx_pg->entries;
6811 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
6815 ctx_pg = &ctx->cq_mem;
6816 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
6817 mem_size = ctx->cq_entry_size * ctx_pg->entries;
6818 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
6822 ctx_pg = &ctx->vnic_mem;
6823 ctx_pg->entries = ctx->vnic_max_vnic_entries +
6824 ctx->vnic_max_ring_table_entries;
6825 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
6826 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
6830 ctx_pg = &ctx->stat_mem;
6831 ctx_pg->entries = ctx->stat_max_entries;
6832 mem_size = ctx->stat_entry_size * ctx_pg->entries;
6833 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
6838 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6841 ctx_pg = &ctx->mrav_mem;
6842 /* 128K extra is needed to accommodate static AH context
6843 * allocation by f/w.
6845 num_mr = 1024 * 256;
6846 num_ah = 1024 * 128;
6847 ctx_pg->entries = num_mr + num_ah;
6848 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
6849 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
6852 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
6853 if (ctx->mrav_num_entries_units)
6855 ((num_mr / ctx->mrav_num_entries_units) << 16) |
6856 (num_ah / ctx->mrav_num_entries_units);
6858 ctx_pg = &ctx->tim_mem;
6859 ctx_pg->entries = ctx->qp_mem.entries;
6860 mem_size = ctx->tim_entry_size * ctx_pg->entries;
6861 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
6864 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6867 entries = ctx->qp_max_l2_entries + extra_qps;
6868 entries = roundup(entries, ctx->tqm_entries_multiple);
6869 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6870 ctx->tqm_max_entries_per_ring);
6871 for (i = 0; i < bp->max_q + 1; i++) {
6872 ctx_pg = ctx->tqm_mem[i];
6873 ctx_pg->entries = entries;
6874 mem_size = ctx->tqm_entry_size * entries;
6875 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
6878 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
6880 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6881 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6883 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6886 ctx->flags |= BNXT_CTX_FLAG_INITED;
6891 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
6893 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6894 struct hwrm_func_resource_qcaps_input req = {0};
6895 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6898 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6899 req.fid = cpu_to_le16(0xffff);
6901 mutex_lock(&bp->hwrm_cmd_lock);
6902 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6905 goto hwrm_func_resc_qcaps_exit;
6907 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6909 goto hwrm_func_resc_qcaps_exit;
6911 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6912 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6913 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6914 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6915 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6916 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6917 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6918 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6919 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6920 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6921 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6922 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6923 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6924 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6925 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6926 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6928 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6929 u16 max_msix = le16_to_cpu(resp->max_msix);
6931 hw_resc->max_nqs = max_msix;
6932 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6936 struct bnxt_pf_info *pf = &bp->pf;
6938 pf->vf_resv_strategy =
6939 le16_to_cpu(resp->vf_reservation_strategy);
6940 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
6941 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6943 hwrm_func_resc_qcaps_exit:
6944 mutex_unlock(&bp->hwrm_cmd_lock);
6948 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
6951 struct hwrm_func_qcaps_input req = {0};
6952 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6953 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6956 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6957 req.fid = cpu_to_le16(0xffff);
6959 mutex_lock(&bp->hwrm_cmd_lock);
6960 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6962 goto hwrm_func_qcaps_exit;
6964 flags = le32_to_cpu(resp->flags);
6965 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
6966 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6967 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
6968 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6969 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
6970 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
6971 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
6972 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
6973 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
6974 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
6975 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
6976 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
6977 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
6978 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
6980 bp->tx_push_thresh = 0;
6981 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
6982 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6984 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6985 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6986 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6987 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6988 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6989 if (!hw_resc->max_hw_ring_grps)
6990 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6991 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6992 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6993 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6996 struct bnxt_pf_info *pf = &bp->pf;
6998 pf->fw_fid = le16_to_cpu(resp->fid);
6999 pf->port_id = le16_to_cpu(resp->port_id);
7000 bp->dev->dev_port = pf->port_id;
7001 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7002 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7003 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7004 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7005 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7006 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7007 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7008 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7009 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7010 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7011 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7012 bp->flags |= BNXT_FLAG_WOL_CAP;
7014 #ifdef CONFIG_BNXT_SRIOV
7015 struct bnxt_vf_info *vf = &bp->vf;
7017 vf->fw_fid = le16_to_cpu(resp->fid);
7018 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7022 hwrm_func_qcaps_exit:
7023 mutex_unlock(&bp->hwrm_cmd_lock);
7027 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7029 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7033 rc = __bnxt_hwrm_func_qcaps(bp);
7036 rc = bnxt_hwrm_queue_qportcfg(bp);
7038 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7041 if (bp->hwrm_spec_code >= 0x10803) {
7042 rc = bnxt_alloc_ctx_mem(bp);
7045 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7047 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7052 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7054 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7055 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7059 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7062 resp = bp->hwrm_cmd_resp_addr;
7063 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7065 mutex_lock(&bp->hwrm_cmd_lock);
7066 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7068 goto hwrm_cfa_adv_qcaps_exit;
7070 flags = le32_to_cpu(resp->flags);
7072 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7073 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7075 hwrm_cfa_adv_qcaps_exit:
7076 mutex_unlock(&bp->hwrm_cmd_lock);
7080 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7082 struct bnxt_fw_health *fw_health = bp->fw_health;
7083 u32 reg_base = 0xffffffff;
7086 /* Only pre-map the monitoring GRC registers using window 3 */
7087 for (i = 0; i < 4; i++) {
7088 u32 reg = fw_health->regs[i];
7090 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7092 if (reg_base == 0xffffffff)
7093 reg_base = reg & BNXT_GRC_BASE_MASK;
7094 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7096 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
7097 (reg & BNXT_GRC_OFFSET_MASK);
7099 if (reg_base == 0xffffffff)
7102 writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7103 BNXT_FW_HEALTH_WIN_MAP_OFF);
7107 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7109 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7110 struct bnxt_fw_health *fw_health = bp->fw_health;
7111 struct hwrm_error_recovery_qcfg_input req = {0};
7114 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7117 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7118 mutex_lock(&bp->hwrm_cmd_lock);
7119 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7121 goto err_recovery_out;
7122 fw_health->flags = le32_to_cpu(resp->flags);
7123 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7124 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7126 goto err_recovery_out;
7128 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7129 fw_health->master_func_wait_dsecs =
7130 le32_to_cpu(resp->master_func_wait_period);
7131 fw_health->normal_func_wait_dsecs =
7132 le32_to_cpu(resp->normal_func_wait_period);
7133 fw_health->post_reset_wait_dsecs =
7134 le32_to_cpu(resp->master_func_wait_period_after_reset);
7135 fw_health->post_reset_max_wait_dsecs =
7136 le32_to_cpu(resp->max_bailout_time_after_reset);
7137 fw_health->regs[BNXT_FW_HEALTH_REG] =
7138 le32_to_cpu(resp->fw_health_status_reg);
7139 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7140 le32_to_cpu(resp->fw_heartbeat_reg);
7141 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7142 le32_to_cpu(resp->fw_reset_cnt_reg);
7143 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7144 le32_to_cpu(resp->reset_inprogress_reg);
7145 fw_health->fw_reset_inprog_reg_mask =
7146 le32_to_cpu(resp->reset_inprogress_reg_mask);
7147 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7148 if (fw_health->fw_reset_seq_cnt >= 16) {
7150 goto err_recovery_out;
7152 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7153 fw_health->fw_reset_seq_regs[i] =
7154 le32_to_cpu(resp->reset_reg[i]);
7155 fw_health->fw_reset_seq_vals[i] =
7156 le32_to_cpu(resp->reset_reg_val[i]);
7157 fw_health->fw_reset_seq_delay_msec[i] =
7158 resp->delay_after_reset[i];
7161 mutex_unlock(&bp->hwrm_cmd_lock);
7163 rc = bnxt_map_fw_health_regs(bp);
7165 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7169 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7171 struct hwrm_func_reset_input req = {0};
7173 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7176 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7179 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7182 struct hwrm_queue_qportcfg_input req = {0};
7183 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7187 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7189 mutex_lock(&bp->hwrm_cmd_lock);
7190 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7194 if (!resp->max_configurable_queues) {
7198 bp->max_tc = resp->max_configurable_queues;
7199 bp->max_lltc = resp->max_configurable_lossless_queues;
7200 if (bp->max_tc > BNXT_MAX_QUEUE)
7201 bp->max_tc = BNXT_MAX_QUEUE;
7203 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7204 qptr = &resp->queue_id0;
7205 for (i = 0, j = 0; i < bp->max_tc; i++) {
7206 bp->q_info[j].queue_id = *qptr;
7207 bp->q_ids[i] = *qptr++;
7208 bp->q_info[j].queue_profile = *qptr++;
7209 bp->tc_to_qidx[j] = j;
7210 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7211 (no_rdma && BNXT_PF(bp)))
7214 bp->max_q = bp->max_tc;
7215 bp->max_tc = max_t(u8, j, 1);
7217 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7220 if (bp->max_lltc > bp->max_tc)
7221 bp->max_lltc = bp->max_tc;
7224 mutex_unlock(&bp->hwrm_cmd_lock);
7228 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7230 struct hwrm_ver_get_input req = {0};
7233 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7234 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7235 req.hwrm_intf_min = HWRM_VERSION_MINOR;
7236 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7238 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7243 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7245 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7249 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7250 mutex_lock(&bp->hwrm_cmd_lock);
7251 rc = __bnxt_hwrm_ver_get(bp, false);
7253 goto hwrm_ver_get_exit;
7255 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7257 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7258 resp->hwrm_intf_min_8b << 8 |
7259 resp->hwrm_intf_upd_8b;
7260 if (resp->hwrm_intf_maj_8b < 1) {
7261 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7262 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7263 resp->hwrm_intf_upd_8b);
7264 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7266 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
7267 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
7268 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
7270 if (strlen(resp->active_pkg_name)) {
7271 int fw_ver_len = strlen(bp->fw_ver_str);
7273 snprintf(bp->fw_ver_str + fw_ver_len,
7274 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7275 resp->active_pkg_name);
7276 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7279 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7280 if (!bp->hwrm_cmd_timeout)
7281 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7283 if (resp->hwrm_intf_maj_8b >= 1) {
7284 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7285 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7287 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7288 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7290 bp->chip_num = le16_to_cpu(resp->chip_num);
7291 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7293 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7295 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7296 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7297 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7298 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7300 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7301 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7304 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7305 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7308 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7309 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7312 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7313 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7316 mutex_unlock(&bp->hwrm_cmd_lock);
7320 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7322 struct hwrm_fw_set_time_input req = {0};
7324 time64_t now = ktime_get_real_seconds();
7326 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7327 bp->hwrm_spec_code < 0x10400)
7330 time64_to_tm(now, 0, &tm);
7331 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7332 req.year = cpu_to_le16(1900 + tm.tm_year);
7333 req.month = 1 + tm.tm_mon;
7334 req.day = tm.tm_mday;
7335 req.hour = tm.tm_hour;
7336 req.minute = tm.tm_min;
7337 req.second = tm.tm_sec;
7338 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7341 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
7344 struct bnxt_pf_info *pf = &bp->pf;
7345 struct hwrm_port_qstats_input req = {0};
7347 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7350 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7351 req.port_id = cpu_to_le16(pf->port_id);
7352 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
7353 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
7354 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7358 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
7360 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
7361 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
7362 struct hwrm_port_qstats_ext_input req = {0};
7363 struct bnxt_pf_info *pf = &bp->pf;
7367 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7370 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7371 req.port_id = cpu_to_le16(pf->port_id);
7372 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7373 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
7374 tx_stat_size = bp->hw_tx_port_stats_ext ?
7375 sizeof(*bp->hw_tx_port_stats_ext) : 0;
7376 req.tx_stat_size = cpu_to_le16(tx_stat_size);
7377 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
7378 mutex_lock(&bp->hwrm_cmd_lock);
7379 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7381 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
7382 bp->fw_tx_stats_ext_size = tx_stat_size ?
7383 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
7385 bp->fw_rx_stats_ext_size = 0;
7386 bp->fw_tx_stats_ext_size = 0;
7388 if (bp->fw_tx_stats_ext_size <=
7389 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7390 mutex_unlock(&bp->hwrm_cmd_lock);
7391 bp->pri2cos_valid = 0;
7395 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
7396 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
7398 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
7400 struct hwrm_queue_pri2cos_qcfg_output *resp2;
7404 resp2 = bp->hwrm_cmd_resp_addr;
7405 pri2cos = &resp2->pri0_cos_queue_id;
7406 for (i = 0; i < 8; i++) {
7407 u8 queue_id = pri2cos[i];
7409 for (j = 0; j < bp->max_q; j++) {
7410 if (bp->q_ids[j] == queue_id)
7414 bp->pri2cos_valid = 1;
7416 mutex_unlock(&bp->hwrm_cmd_lock);
7420 static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
7422 struct hwrm_pcie_qstats_input req = {0};
7424 if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
7427 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
7428 req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
7429 req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
7430 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7433 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
7435 if (bp->vxlan_port_cnt) {
7436 bnxt_hwrm_tunnel_dst_port_free(
7437 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7439 bp->vxlan_port_cnt = 0;
7440 if (bp->nge_port_cnt) {
7441 bnxt_hwrm_tunnel_dst_port_free(
7442 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7444 bp->nge_port_cnt = 0;
7447 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
7453 tpa_flags = bp->flags & BNXT_FLAG_TPA;
7454 else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
7456 for (i = 0; i < bp->nr_vnics; i++) {
7457 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
7459 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
7467 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7471 for (i = 0; i < bp->nr_vnics; i++)
7472 bnxt_hwrm_vnic_set_rss(bp, i, false);
7475 static void bnxt_clear_vnic(struct bnxt *bp)
7480 bnxt_hwrm_clear_vnic_filter(bp);
7481 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
7482 /* clear all RSS setting before free vnic ctx */
7483 bnxt_hwrm_clear_vnic_rss(bp);
7484 bnxt_hwrm_vnic_ctx_free(bp);
7486 /* before free the vnic, undo the vnic tpa settings */
7487 if (bp->flags & BNXT_FLAG_TPA)
7488 bnxt_set_tpa(bp, false);
7489 bnxt_hwrm_vnic_free(bp);
7490 if (bp->flags & BNXT_FLAG_CHIP_P5)
7491 bnxt_hwrm_vnic_ctx_free(bp);
7494 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7497 bnxt_clear_vnic(bp);
7498 bnxt_hwrm_ring_free(bp, close_path);
7499 bnxt_hwrm_ring_grp_free(bp);
7501 bnxt_hwrm_stat_ctx_free(bp);
7502 bnxt_hwrm_free_tunnel_ports(bp);
7506 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
7508 struct hwrm_func_cfg_input req = {0};
7511 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7512 req.fid = cpu_to_le16(0xffff);
7513 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
7514 if (br_mode == BRIDGE_MODE_VEB)
7515 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
7516 else if (br_mode == BRIDGE_MODE_VEPA)
7517 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
7520 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7524 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
7526 struct hwrm_func_cfg_input req = {0};
7529 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
7532 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7533 req.fid = cpu_to_le16(0xffff);
7534 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
7535 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
7537 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
7539 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7543 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7545 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
7548 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
7551 /* allocate context for vnic */
7552 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
7554 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7556 goto vnic_setup_err;
7558 bp->rsscos_nr_ctxs++;
7560 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7561 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
7563 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
7565 goto vnic_setup_err;
7567 bp->rsscos_nr_ctxs++;
7571 /* configure default vnic, ring grp */
7572 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7574 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7576 goto vnic_setup_err;
7579 /* Enable RSS hashing on vnic */
7580 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
7582 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
7584 goto vnic_setup_err;
7587 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7588 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7590 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7599 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
7603 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
7604 for (i = 0; i < nr_ctxs; i++) {
7605 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
7607 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
7611 bp->rsscos_nr_ctxs++;
7616 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
7618 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
7622 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7624 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7628 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7629 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7631 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7638 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7640 if (bp->flags & BNXT_FLAG_CHIP_P5)
7641 return __bnxt_setup_vnic_p5(bp, vnic_id);
7643 return __bnxt_setup_vnic(bp, vnic_id);
7646 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
7648 #ifdef CONFIG_RFS_ACCEL
7651 if (bp->flags & BNXT_FLAG_CHIP_P5)
7654 for (i = 0; i < bp->rx_nr_rings; i++) {
7655 struct bnxt_vnic_info *vnic;
7656 u16 vnic_id = i + 1;
7659 if (vnic_id >= bp->nr_vnics)
7662 vnic = &bp->vnic_info[vnic_id];
7663 vnic->flags |= BNXT_VNIC_RFS_FLAG;
7664 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7665 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
7666 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
7668 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7672 rc = bnxt_setup_vnic(bp, vnic_id);
7682 /* Allow PF and VF with default VLAN to be in promiscuous mode */
7683 static bool bnxt_promisc_ok(struct bnxt *bp)
7685 #ifdef CONFIG_BNXT_SRIOV
7686 if (BNXT_VF(bp) && !bp->vf.vlan)
7692 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7694 unsigned int rc = 0;
7696 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7698 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7703 rc = bnxt_hwrm_vnic_cfg(bp, 1);
7705 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7712 static int bnxt_cfg_rx_mode(struct bnxt *);
7713 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
7715 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7717 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7719 unsigned int rx_nr_rings = bp->rx_nr_rings;
7722 rc = bnxt_hwrm_stat_ctx_alloc(bp);
7724 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7730 rc = bnxt_hwrm_ring_alloc(bp);
7732 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7736 rc = bnxt_hwrm_ring_grp_alloc(bp);
7738 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7742 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7745 /* default vnic 0 */
7746 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
7748 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7752 rc = bnxt_setup_vnic(bp, 0);
7756 if (bp->flags & BNXT_FLAG_RFS) {
7757 rc = bnxt_alloc_rfs_vnics(bp);
7762 if (bp->flags & BNXT_FLAG_TPA) {
7763 rc = bnxt_set_tpa(bp, true);
7769 bnxt_update_vf_mac(bp);
7771 /* Filter for default vnic 0 */
7772 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7774 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7777 vnic->uc_filter_count = 1;
7780 if (bp->dev->flags & IFF_BROADCAST)
7781 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
7783 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7784 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7786 if (bp->dev->flags & IFF_ALLMULTI) {
7787 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7788 vnic->mc_list_count = 0;
7792 bnxt_mc_list_updated(bp, &mask);
7793 vnic->rx_mask |= mask;
7796 rc = bnxt_cfg_rx_mode(bp);
7800 rc = bnxt_hwrm_set_coal(bp);
7802 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
7805 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7806 rc = bnxt_setup_nitroa0_vnic(bp);
7808 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7813 bnxt_hwrm_func_qcfg(bp);
7814 netdev_update_features(bp->dev);
7820 bnxt_hwrm_resource_free(bp, 0, true);
7825 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7827 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7831 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7833 bnxt_init_cp_rings(bp);
7834 bnxt_init_rx_rings(bp);
7835 bnxt_init_tx_rings(bp);
7836 bnxt_init_ring_grps(bp, irq_re_init);
7837 bnxt_init_vnics(bp);
7839 return bnxt_init_chip(bp, irq_re_init);
7842 static int bnxt_set_real_num_queues(struct bnxt *bp)
7845 struct net_device *dev = bp->dev;
7847 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7848 bp->tx_nr_rings_xdp);
7852 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7856 #ifdef CONFIG_RFS_ACCEL
7857 if (bp->flags & BNXT_FLAG_RFS)
7858 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
7864 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7867 int _rx = *rx, _tx = *tx;
7870 *rx = min_t(int, _rx, max);
7871 *tx = min_t(int, _tx, max);
7876 while (_rx + _tx > max) {
7877 if (_rx > _tx && _rx > 1)
7888 static void bnxt_setup_msix(struct bnxt *bp)
7890 const int len = sizeof(bp->irq_tbl[0].name);
7891 struct net_device *dev = bp->dev;
7894 tcs = netdev_get_num_tc(dev);
7898 for (i = 0; i < tcs; i++) {
7899 count = bp->tx_nr_rings_per_tc;
7901 netdev_set_tc_queue(dev, i, count, off);
7905 for (i = 0; i < bp->cp_nr_rings; i++) {
7906 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7909 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7911 else if (i < bp->rx_nr_rings)
7916 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7918 bp->irq_tbl[map_idx].handler = bnxt_msix;
7922 static void bnxt_setup_inta(struct bnxt *bp)
7924 const int len = sizeof(bp->irq_tbl[0].name);
7926 if (netdev_get_num_tc(bp->dev))
7927 netdev_reset_tc(bp->dev);
7929 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7931 bp->irq_tbl[0].handler = bnxt_inta;
7934 static int bnxt_setup_int_mode(struct bnxt *bp)
7938 if (bp->flags & BNXT_FLAG_USING_MSIX)
7939 bnxt_setup_msix(bp);
7941 bnxt_setup_inta(bp);
7943 rc = bnxt_set_real_num_queues(bp);
7947 #ifdef CONFIG_RFS_ACCEL
7948 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7950 return bp->hw_resc.max_rsscos_ctxs;
7953 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7955 return bp->hw_resc.max_vnics;
7959 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7961 return bp->hw_resc.max_stat_ctxs;
7964 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7966 return bp->hw_resc.max_cp_rings;
7969 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
7971 unsigned int cp = bp->hw_resc.max_cp_rings;
7973 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7974 cp -= bnxt_get_ulp_msix_num(bp);
7979 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7981 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7983 if (bp->flags & BNXT_FLAG_CHIP_P5)
7984 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7986 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7989 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
7991 bp->hw_resc.max_irqs = max_irqs;
7994 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7998 cp = bnxt_get_max_func_cp_rings_for_en(bp);
7999 if (bp->flags & BNXT_FLAG_CHIP_P5)
8000 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8002 return cp - bp->cp_nr_rings;
8005 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8007 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8010 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8012 int max_cp = bnxt_get_max_func_cp_rings(bp);
8013 int max_irq = bnxt_get_max_func_irqs(bp);
8014 int total_req = bp->cp_nr_rings + num;
8015 int max_idx, avail_msix;
8017 max_idx = bp->total_irqs;
8018 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8019 max_idx = min_t(int, bp->total_irqs, max_cp);
8020 avail_msix = max_idx - bp->cp_nr_rings;
8021 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8024 if (max_irq < total_req) {
8025 num = max_irq - bp->cp_nr_rings;
8032 static int bnxt_get_num_msix(struct bnxt *bp)
8034 if (!BNXT_NEW_RM(bp))
8035 return bnxt_get_max_func_irqs(bp);
8037 return bnxt_nq_rings_in_use(bp);
8040 static int bnxt_init_msix(struct bnxt *bp)
8042 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8043 struct msix_entry *msix_ent;
8045 total_vecs = bnxt_get_num_msix(bp);
8046 max = bnxt_get_max_func_irqs(bp);
8047 if (total_vecs > max)
8053 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8057 for (i = 0; i < total_vecs; i++) {
8058 msix_ent[i].entry = i;
8059 msix_ent[i].vector = 0;
8062 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8065 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8066 ulp_msix = bnxt_get_ulp_msix_num(bp);
8067 if (total_vecs < 0 || total_vecs < ulp_msix) {
8069 goto msix_setup_exit;
8072 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8074 for (i = 0; i < total_vecs; i++)
8075 bp->irq_tbl[i].vector = msix_ent[i].vector;
8077 bp->total_irqs = total_vecs;
8078 /* Trim rings based upon num of vectors allocated */
8079 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8080 total_vecs - ulp_msix, min == 1);
8082 goto msix_setup_exit;
8084 bp->cp_nr_rings = (min == 1) ?
8085 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8086 bp->tx_nr_rings + bp->rx_nr_rings;
8090 goto msix_setup_exit;
8092 bp->flags |= BNXT_FLAG_USING_MSIX;
8097 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8100 pci_disable_msix(bp->pdev);
8105 static int bnxt_init_inta(struct bnxt *bp)
8107 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
8112 bp->rx_nr_rings = 1;
8113 bp->tx_nr_rings = 1;
8114 bp->cp_nr_rings = 1;
8115 bp->flags |= BNXT_FLAG_SHARED_RINGS;
8116 bp->irq_tbl[0].vector = bp->pdev->irq;
8120 static int bnxt_init_int_mode(struct bnxt *bp)
8124 if (bp->flags & BNXT_FLAG_MSIX_CAP)
8125 rc = bnxt_init_msix(bp);
8127 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8128 /* fallback to INTA */
8129 rc = bnxt_init_inta(bp);
8134 static void bnxt_clear_int_mode(struct bnxt *bp)
8136 if (bp->flags & BNXT_FLAG_USING_MSIX)
8137 pci_disable_msix(bp->pdev);
8141 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8144 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8146 int tcs = netdev_get_num_tc(bp->dev);
8147 bool irq_cleared = false;
8150 if (!bnxt_need_reserve_rings(bp))
8153 if (irq_re_init && BNXT_NEW_RM(bp) &&
8154 bnxt_get_num_msix(bp) != bp->total_irqs) {
8155 bnxt_ulp_irq_stop(bp);
8156 bnxt_clear_int_mode(bp);
8159 rc = __bnxt_reserve_rings(bp);
8162 rc = bnxt_init_int_mode(bp);
8163 bnxt_ulp_irq_restart(bp, rc);
8166 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8169 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8170 netdev_err(bp->dev, "tx ring reservation failure\n");
8171 netdev_reset_tc(bp->dev);
8172 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8178 static void bnxt_free_irq(struct bnxt *bp)
8180 struct bnxt_irq *irq;
8183 #ifdef CONFIG_RFS_ACCEL
8184 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8185 bp->dev->rx_cpu_rmap = NULL;
8187 if (!bp->irq_tbl || !bp->bnapi)
8190 for (i = 0; i < bp->cp_nr_rings; i++) {
8191 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8193 irq = &bp->irq_tbl[map_idx];
8194 if (irq->requested) {
8195 if (irq->have_cpumask) {
8196 irq_set_affinity_hint(irq->vector, NULL);
8197 free_cpumask_var(irq->cpu_mask);
8198 irq->have_cpumask = 0;
8200 free_irq(irq->vector, bp->bnapi[i]);
8207 static int bnxt_request_irq(struct bnxt *bp)
8210 unsigned long flags = 0;
8211 #ifdef CONFIG_RFS_ACCEL
8212 struct cpu_rmap *rmap;
8215 rc = bnxt_setup_int_mode(bp);
8217 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8221 #ifdef CONFIG_RFS_ACCEL
8222 rmap = bp->dev->rx_cpu_rmap;
8224 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8225 flags = IRQF_SHARED;
8227 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
8228 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8229 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8231 #ifdef CONFIG_RFS_ACCEL
8232 if (rmap && bp->bnapi[i]->rx_ring) {
8233 rc = irq_cpu_rmap_add(rmap, irq->vector);
8235 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
8240 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8247 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8248 int numa_node = dev_to_node(&bp->pdev->dev);
8250 irq->have_cpumask = 1;
8251 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8253 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8255 netdev_warn(bp->dev,
8256 "Set affinity failed, IRQ = %d\n",
8265 static void bnxt_del_napi(struct bnxt *bp)
8272 for (i = 0; i < bp->cp_nr_rings; i++) {
8273 struct bnxt_napi *bnapi = bp->bnapi[i];
8275 napi_hash_del(&bnapi->napi);
8276 netif_napi_del(&bnapi->napi);
8278 /* We called napi_hash_del() before netif_napi_del(), we need
8279 * to respect an RCU grace period before freeing napi structures.
8284 static void bnxt_init_napi(struct bnxt *bp)
8287 unsigned int cp_nr_rings = bp->cp_nr_rings;
8288 struct bnxt_napi *bnapi;
8290 if (bp->flags & BNXT_FLAG_USING_MSIX) {
8291 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8293 if (bp->flags & BNXT_FLAG_CHIP_P5)
8294 poll_fn = bnxt_poll_p5;
8295 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8297 for (i = 0; i < cp_nr_rings; i++) {
8298 bnapi = bp->bnapi[i];
8299 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8301 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8302 bnapi = bp->bnapi[cp_nr_rings];
8303 netif_napi_add(bp->dev, &bnapi->napi,
8304 bnxt_poll_nitroa0, 64);
8307 bnapi = bp->bnapi[0];
8308 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
8312 static void bnxt_disable_napi(struct bnxt *bp)
8319 for (i = 0; i < bp->cp_nr_rings; i++) {
8320 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8322 if (bp->bnapi[i]->rx_ring)
8323 cancel_work_sync(&cpr->dim.work);
8325 napi_disable(&bp->bnapi[i]->napi);
8329 static void bnxt_enable_napi(struct bnxt *bp)
8333 for (i = 0; i < bp->cp_nr_rings; i++) {
8334 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8335 bp->bnapi[i]->in_reset = false;
8337 if (bp->bnapi[i]->rx_ring) {
8338 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
8339 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
8341 napi_enable(&bp->bnapi[i]->napi);
8345 void bnxt_tx_disable(struct bnxt *bp)
8348 struct bnxt_tx_ring_info *txr;
8351 for (i = 0; i < bp->tx_nr_rings; i++) {
8352 txr = &bp->tx_ring[i];
8353 txr->dev_state = BNXT_DEV_STATE_CLOSING;
8356 /* Stop all TX queues */
8357 netif_tx_disable(bp->dev);
8358 netif_carrier_off(bp->dev);
8361 void bnxt_tx_enable(struct bnxt *bp)
8364 struct bnxt_tx_ring_info *txr;
8366 for (i = 0; i < bp->tx_nr_rings; i++) {
8367 txr = &bp->tx_ring[i];
8370 netif_tx_wake_all_queues(bp->dev);
8371 if (bp->link_info.link_up)
8372 netif_carrier_on(bp->dev);
8375 static void bnxt_report_link(struct bnxt *bp)
8377 if (bp->link_info.link_up) {
8379 const char *flow_ctrl;
8383 netif_carrier_on(bp->dev);
8384 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
8388 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
8389 flow_ctrl = "ON - receive & transmit";
8390 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
8391 flow_ctrl = "ON - transmit";
8392 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
8393 flow_ctrl = "ON - receive";
8396 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
8397 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
8398 speed, duplex, flow_ctrl);
8399 if (bp->flags & BNXT_FLAG_EEE_CAP)
8400 netdev_info(bp->dev, "EEE is %s\n",
8401 bp->eee.eee_active ? "active" :
8403 fec = bp->link_info.fec_cfg;
8404 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
8405 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
8406 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
8407 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
8408 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
8410 netif_carrier_off(bp->dev);
8411 netdev_err(bp->dev, "NIC Link is Down\n");
8415 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
8418 struct hwrm_port_phy_qcaps_input req = {0};
8419 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8420 struct bnxt_link_info *link_info = &bp->link_info;
8422 bp->flags &= ~BNXT_FLAG_EEE_CAP;
8424 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
8425 BNXT_TEST_FL_AN_PHY_LPBK);
8426 if (bp->hwrm_spec_code < 0x10201)
8429 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
8431 mutex_lock(&bp->hwrm_cmd_lock);
8432 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8434 goto hwrm_phy_qcaps_exit;
8436 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
8437 struct ethtool_eee *eee = &bp->eee;
8438 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
8440 bp->flags |= BNXT_FLAG_EEE_CAP;
8441 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8442 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
8443 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
8444 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
8445 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
8447 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
8449 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
8451 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
8453 bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
8455 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
8457 bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
8459 if (resp->supported_speeds_auto_mode)
8460 link_info->support_auto_speeds =
8461 le16_to_cpu(resp->supported_speeds_auto_mode);
8463 bp->port_count = resp->port_cnt;
8465 hwrm_phy_qcaps_exit:
8466 mutex_unlock(&bp->hwrm_cmd_lock);
8470 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
8473 struct bnxt_link_info *link_info = &bp->link_info;
8474 struct hwrm_port_phy_qcfg_input req = {0};
8475 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8476 u8 link_up = link_info->link_up;
8479 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
8481 mutex_lock(&bp->hwrm_cmd_lock);
8482 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8484 mutex_unlock(&bp->hwrm_cmd_lock);
8488 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
8489 link_info->phy_link_status = resp->link;
8490 link_info->duplex = resp->duplex_cfg;
8491 if (bp->hwrm_spec_code >= 0x10800)
8492 link_info->duplex = resp->duplex_state;
8493 link_info->pause = resp->pause;
8494 link_info->auto_mode = resp->auto_mode;
8495 link_info->auto_pause_setting = resp->auto_pause;
8496 link_info->lp_pause = resp->link_partner_adv_pause;
8497 link_info->force_pause_setting = resp->force_pause;
8498 link_info->duplex_setting = resp->duplex_cfg;
8499 if (link_info->phy_link_status == BNXT_LINK_LINK)
8500 link_info->link_speed = le16_to_cpu(resp->link_speed);
8502 link_info->link_speed = 0;
8503 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
8504 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
8505 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
8506 link_info->lp_auto_link_speeds =
8507 le16_to_cpu(resp->link_partner_adv_speeds);
8508 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
8509 link_info->phy_ver[0] = resp->phy_maj;
8510 link_info->phy_ver[1] = resp->phy_min;
8511 link_info->phy_ver[2] = resp->phy_bld;
8512 link_info->media_type = resp->media_type;
8513 link_info->phy_type = resp->phy_type;
8514 link_info->transceiver = resp->xcvr_pkg_type;
8515 link_info->phy_addr = resp->eee_config_phy_addr &
8516 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
8517 link_info->module_status = resp->module_status;
8519 if (bp->flags & BNXT_FLAG_EEE_CAP) {
8520 struct ethtool_eee *eee = &bp->eee;
8523 eee->eee_active = 0;
8524 if (resp->eee_config_phy_addr &
8525 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
8526 eee->eee_active = 1;
8527 fw_speeds = le16_to_cpu(
8528 resp->link_partner_adv_eee_link_speed_mask);
8529 eee->lp_advertised =
8530 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8533 /* Pull initial EEE config */
8534 if (!chng_link_state) {
8535 if (resp->eee_config_phy_addr &
8536 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
8537 eee->eee_enabled = 1;
8539 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
8541 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8543 if (resp->eee_config_phy_addr &
8544 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
8547 eee->tx_lpi_enabled = 1;
8548 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
8549 eee->tx_lpi_timer = le32_to_cpu(tmr) &
8550 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
8555 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8556 if (bp->hwrm_spec_code >= 0x10504)
8557 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8559 /* TODO: need to add more logic to report VF link */
8560 if (chng_link_state) {
8561 if (link_info->phy_link_status == BNXT_LINK_LINK)
8562 link_info->link_up = 1;
8564 link_info->link_up = 0;
8565 if (link_up != link_info->link_up)
8566 bnxt_report_link(bp);
8568 /* alwasy link down if not require to update link state */
8569 link_info->link_up = 0;
8571 mutex_unlock(&bp->hwrm_cmd_lock);
8573 if (!BNXT_PHY_CFG_ABLE(bp))
8576 diff = link_info->support_auto_speeds ^ link_info->advertising;
8577 if ((link_info->support_auto_speeds | diff) !=
8578 link_info->support_auto_speeds) {
8579 /* An advertised speed is no longer supported, so we need to
8580 * update the advertisement settings. Caller holds RTNL
8581 * so we can modify link settings.
8583 link_info->advertising = link_info->support_auto_speeds;
8584 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
8585 bnxt_hwrm_set_link_setting(bp, true, false);
8590 static void bnxt_get_port_module_status(struct bnxt *bp)
8592 struct bnxt_link_info *link_info = &bp->link_info;
8593 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
8596 if (bnxt_update_link(bp, true))
8599 module_status = link_info->module_status;
8600 switch (module_status) {
8601 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
8602 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
8603 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
8604 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
8606 if (bp->hwrm_spec_code >= 0x10201) {
8607 netdev_warn(bp->dev, "Module part number %s\n",
8608 resp->phy_vendor_partnumber);
8610 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
8611 netdev_warn(bp->dev, "TX is disabled\n");
8612 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
8613 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
8618 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
8620 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
8621 if (bp->hwrm_spec_code >= 0x10201)
8623 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
8624 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8625 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
8626 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8627 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
8629 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8631 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8632 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
8633 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8634 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
8636 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
8637 if (bp->hwrm_spec_code >= 0x10201) {
8638 req->auto_pause = req->force_pause;
8639 req->enables |= cpu_to_le32(
8640 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8645 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
8646 struct hwrm_port_phy_cfg_input *req)
8648 u8 autoneg = bp->link_info.autoneg;
8649 u16 fw_link_speed = bp->link_info.req_link_speed;
8650 u16 advertising = bp->link_info.advertising;
8652 if (autoneg & BNXT_AUTONEG_SPEED) {
8654 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
8656 req->enables |= cpu_to_le32(
8657 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
8658 req->auto_link_speed_mask = cpu_to_le16(advertising);
8660 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
8662 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
8664 req->force_link_speed = cpu_to_le16(fw_link_speed);
8665 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
8668 /* tell chimp that the setting takes effect immediately */
8669 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
8672 int bnxt_hwrm_set_pause(struct bnxt *bp)
8674 struct hwrm_port_phy_cfg_input req = {0};
8677 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8678 bnxt_hwrm_set_pause_common(bp, &req);
8680 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8681 bp->link_info.force_link_chng)
8682 bnxt_hwrm_set_link_common(bp, &req);
8684 mutex_lock(&bp->hwrm_cmd_lock);
8685 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8686 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8687 /* since changing of pause setting doesn't trigger any link
8688 * change event, the driver needs to update the current pause
8689 * result upon successfully return of the phy_cfg command
8691 bp->link_info.pause =
8692 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8693 bp->link_info.auto_pause_setting = 0;
8694 if (!bp->link_info.force_link_chng)
8695 bnxt_report_link(bp);
8697 bp->link_info.force_link_chng = false;
8698 mutex_unlock(&bp->hwrm_cmd_lock);
8702 static void bnxt_hwrm_set_eee(struct bnxt *bp,
8703 struct hwrm_port_phy_cfg_input *req)
8705 struct ethtool_eee *eee = &bp->eee;
8707 if (eee->eee_enabled) {
8709 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8711 if (eee->tx_lpi_enabled)
8712 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8714 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8716 req->flags |= cpu_to_le32(flags);
8717 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8718 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8719 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8721 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8725 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
8727 struct hwrm_port_phy_cfg_input req = {0};
8729 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8731 bnxt_hwrm_set_pause_common(bp, &req);
8733 bnxt_hwrm_set_link_common(bp, &req);
8736 bnxt_hwrm_set_eee(bp, &req);
8737 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8740 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8742 struct hwrm_port_phy_cfg_input req = {0};
8744 if (!BNXT_SINGLE_PF(bp))
8747 if (pci_num_vf(bp->pdev))
8750 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8751 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
8752 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8755 static int bnxt_fw_init_one(struct bnxt *bp);
8757 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8759 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8760 struct hwrm_func_drv_if_change_input req = {0};
8761 bool resc_reinit = false, fw_reset = false;
8765 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8768 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8770 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8771 mutex_lock(&bp->hwrm_cmd_lock);
8772 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8774 flags = le32_to_cpu(resp->flags);
8775 mutex_unlock(&bp->hwrm_cmd_lock);
8782 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
8784 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
8787 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
8788 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
8791 if (resc_reinit || fw_reset) {
8793 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
8795 bnxt_free_ctx_mem(bp);
8798 rc = bnxt_fw_init_one(bp);
8800 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
8803 bnxt_clear_int_mode(bp);
8804 rc = bnxt_init_int_mode(bp);
8806 netdev_err(bp->dev, "init int mode failed\n");
8809 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
8811 if (BNXT_NEW_RM(bp)) {
8812 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8814 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8815 hw_resc->resv_cp_rings = 0;
8816 hw_resc->resv_stat_ctxs = 0;
8817 hw_resc->resv_irqs = 0;
8818 hw_resc->resv_tx_rings = 0;
8819 hw_resc->resv_rx_rings = 0;
8820 hw_resc->resv_hw_ring_grps = 0;
8821 hw_resc->resv_vnics = 0;
8823 bp->tx_nr_rings = 0;
8824 bp->rx_nr_rings = 0;
8831 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8833 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8834 struct hwrm_port_led_qcaps_input req = {0};
8835 struct bnxt_pf_info *pf = &bp->pf;
8839 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8842 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8843 req.port_id = cpu_to_le16(pf->port_id);
8844 mutex_lock(&bp->hwrm_cmd_lock);
8845 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8847 mutex_unlock(&bp->hwrm_cmd_lock);
8850 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8853 bp->num_leds = resp->num_leds;
8854 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8856 for (i = 0; i < bp->num_leds; i++) {
8857 struct bnxt_led_info *led = &bp->leds[i];
8858 __le16 caps = led->led_state_caps;
8860 if (!led->led_group_id ||
8861 !BNXT_LED_ALT_BLINK_CAP(caps)) {
8867 mutex_unlock(&bp->hwrm_cmd_lock);
8871 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8873 struct hwrm_wol_filter_alloc_input req = {0};
8874 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8877 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8878 req.port_id = cpu_to_le16(bp->pf.port_id);
8879 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8880 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8881 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8882 mutex_lock(&bp->hwrm_cmd_lock);
8883 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8885 bp->wol_filter_id = resp->wol_filter_id;
8886 mutex_unlock(&bp->hwrm_cmd_lock);
8890 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8892 struct hwrm_wol_filter_free_input req = {0};
8895 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8896 req.port_id = cpu_to_le16(bp->pf.port_id);
8897 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8898 req.wol_filter_id = bp->wol_filter_id;
8899 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8903 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8905 struct hwrm_wol_filter_qcfg_input req = {0};
8906 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8907 u16 next_handle = 0;
8910 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8911 req.port_id = cpu_to_le16(bp->pf.port_id);
8912 req.handle = cpu_to_le16(handle);
8913 mutex_lock(&bp->hwrm_cmd_lock);
8914 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8916 next_handle = le16_to_cpu(resp->next_handle);
8917 if (next_handle != 0) {
8918 if (resp->wol_type ==
8919 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8921 bp->wol_filter_id = resp->wol_filter_id;
8925 mutex_unlock(&bp->hwrm_cmd_lock);
8929 static void bnxt_get_wol_settings(struct bnxt *bp)
8934 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8938 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8939 } while (handle && handle != 0xffff);
8942 #ifdef CONFIG_BNXT_HWMON
8943 static ssize_t bnxt_show_temp(struct device *dev,
8944 struct device_attribute *devattr, char *buf)
8946 struct hwrm_temp_monitor_query_input req = {0};
8947 struct hwrm_temp_monitor_query_output *resp;
8948 struct bnxt *bp = dev_get_drvdata(dev);
8951 resp = bp->hwrm_cmd_resp_addr;
8952 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8953 mutex_lock(&bp->hwrm_cmd_lock);
8954 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8955 temp = resp->temp * 1000; /* display millidegree */
8956 mutex_unlock(&bp->hwrm_cmd_lock);
8958 return sprintf(buf, "%u\n", temp);
8960 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8962 static struct attribute *bnxt_attrs[] = {
8963 &sensor_dev_attr_temp1_input.dev_attr.attr,
8966 ATTRIBUTE_GROUPS(bnxt);
8968 static void bnxt_hwmon_close(struct bnxt *bp)
8970 if (bp->hwmon_dev) {
8971 hwmon_device_unregister(bp->hwmon_dev);
8972 bp->hwmon_dev = NULL;
8976 static void bnxt_hwmon_open(struct bnxt *bp)
8978 struct pci_dev *pdev = bp->pdev;
8983 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8984 DRV_MODULE_NAME, bp,
8986 if (IS_ERR(bp->hwmon_dev)) {
8987 bp->hwmon_dev = NULL;
8988 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8992 static void bnxt_hwmon_close(struct bnxt *bp)
8996 static void bnxt_hwmon_open(struct bnxt *bp)
9001 static bool bnxt_eee_config_ok(struct bnxt *bp)
9003 struct ethtool_eee *eee = &bp->eee;
9004 struct bnxt_link_info *link_info = &bp->link_info;
9006 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
9009 if (eee->eee_enabled) {
9011 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9013 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9014 eee->eee_enabled = 0;
9017 if (eee->advertised & ~advertising) {
9018 eee->advertised = advertising & eee->supported;
9025 static int bnxt_update_phy_setting(struct bnxt *bp)
9028 bool update_link = false;
9029 bool update_pause = false;
9030 bool update_eee = false;
9031 struct bnxt_link_info *link_info = &bp->link_info;
9033 rc = bnxt_update_link(bp, true);
9035 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9039 if (!BNXT_SINGLE_PF(bp))
9042 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9043 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
9044 link_info->req_flow_ctrl)
9045 update_pause = true;
9046 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9047 link_info->force_pause_setting != link_info->req_flow_ctrl)
9048 update_pause = true;
9049 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9050 if (BNXT_AUTO_MODE(link_info->auto_mode))
9052 if (link_info->req_link_speed != link_info->force_link_speed)
9054 if (link_info->req_duplex != link_info->duplex_setting)
9057 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
9059 if (link_info->advertising != link_info->auto_link_speeds)
9063 /* The last close may have shutdown the link, so need to call
9064 * PHY_CFG to bring it back up.
9066 if (!netif_carrier_ok(bp->dev))
9069 if (!bnxt_eee_config_ok(bp))
9073 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
9074 else if (update_pause)
9075 rc = bnxt_hwrm_set_pause(bp);
9077 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9085 /* Common routine to pre-map certain register block to different GRC window.
9086 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9087 * in PF and 3 windows in VF that can be customized to map in different
9090 static void bnxt_preset_reg_win(struct bnxt *bp)
9093 /* CAG registers map to GRC window #4 */
9094 writel(BNXT_CAG_REG_BASE,
9095 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9099 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9101 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9105 bnxt_preset_reg_win(bp);
9106 netif_carrier_off(bp->dev);
9108 /* Reserve rings now if none were reserved at driver probe. */
9109 rc = bnxt_init_dflt_ring_mode(bp);
9111 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9115 rc = bnxt_reserve_rings(bp, irq_re_init);
9118 if ((bp->flags & BNXT_FLAG_RFS) &&
9119 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
9120 /* disable RFS if falling back to INTA */
9121 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
9122 bp->flags &= ~BNXT_FLAG_RFS;
9125 rc = bnxt_alloc_mem(bp, irq_re_init);
9127 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9128 goto open_err_free_mem;
9133 rc = bnxt_request_irq(bp);
9135 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
9140 bnxt_enable_napi(bp);
9141 bnxt_debug_dev_init(bp);
9143 rc = bnxt_init_nic(bp, irq_re_init);
9145 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9150 mutex_lock(&bp->link_lock);
9151 rc = bnxt_update_phy_setting(bp);
9152 mutex_unlock(&bp->link_lock);
9154 netdev_warn(bp->dev, "failed to update phy settings\n");
9155 if (BNXT_SINGLE_PF(bp)) {
9156 bp->link_info.phy_retry = true;
9157 bp->link_info.phy_retry_expires =
9164 udp_tunnel_get_rx_info(bp->dev);
9166 set_bit(BNXT_STATE_OPEN, &bp->state);
9167 bnxt_enable_int(bp);
9168 /* Enable TX queues */
9170 mod_timer(&bp->timer, jiffies + bp->current_interval);
9171 /* Poll link status and check for SFP+ module status */
9172 bnxt_get_port_module_status(bp);
9174 /* VF-reps may need to be re-opened after the PF is re-opened */
9176 bnxt_vf_reps_open(bp);
9180 bnxt_debug_dev_exit(bp);
9181 bnxt_disable_napi(bp);
9189 bnxt_free_mem(bp, true);
9193 /* rtnl_lock held */
9194 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9198 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
9200 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
9206 /* rtnl_lock held, open the NIC half way by allocating all resources, but
9207 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
9210 int bnxt_half_open_nic(struct bnxt *bp)
9214 rc = bnxt_alloc_mem(bp, false);
9216 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9219 rc = bnxt_init_nic(bp, false);
9221 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9228 bnxt_free_mem(bp, false);
9233 /* rtnl_lock held, this call can only be made after a previous successful
9234 * call to bnxt_half_open_nic().
9236 void bnxt_half_close_nic(struct bnxt *bp)
9238 bnxt_hwrm_resource_free(bp, false, false);
9240 bnxt_free_mem(bp, false);
9243 static int bnxt_open(struct net_device *dev)
9245 struct bnxt *bp = netdev_priv(dev);
9248 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9249 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
9253 rc = bnxt_hwrm_if_change(bp, true);
9256 rc = __bnxt_open_nic(bp, true, true);
9258 bnxt_hwrm_if_change(bp, false);
9260 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
9262 struct bnxt_pf_info *pf = &bp->pf;
9263 int n = pf->active_vfs;
9266 bnxt_cfg_hw_sriov(bp, &n, true);
9268 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9269 bnxt_ulp_start(bp, 0);
9271 bnxt_hwmon_open(bp);
9277 static bool bnxt_drv_busy(struct bnxt *bp)
9279 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
9280 test_bit(BNXT_STATE_READ_STATS, &bp->state));
9283 static void bnxt_get_ring_stats(struct bnxt *bp,
9284 struct rtnl_link_stats64 *stats);
9286 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
9289 /* Close the VF-reps before closing PF */
9291 bnxt_vf_reps_close(bp);
9293 /* Change device state to avoid TX queue wake up's */
9294 bnxt_tx_disable(bp);
9296 clear_bit(BNXT_STATE_OPEN, &bp->state);
9297 smp_mb__after_atomic();
9298 while (bnxt_drv_busy(bp))
9301 /* Flush rings and and disable interrupts */
9302 bnxt_shutdown_nic(bp, irq_re_init);
9304 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
9306 bnxt_debug_dev_exit(bp);
9307 bnxt_disable_napi(bp);
9308 del_timer_sync(&bp->timer);
9309 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) &&
9310 pci_is_enabled(bp->pdev))
9311 pci_disable_device(bp->pdev);
9315 /* Save ring stats before shutdown */
9317 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
9322 bnxt_free_mem(bp, irq_re_init);
9325 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9329 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9330 /* If we get here, it means firmware reset is in progress
9331 * while we are trying to close. We can safely proceed with
9332 * the close because we are holding rtnl_lock(). Some firmware
9333 * messages may fail as we proceed to close. We set the
9334 * ABORT_ERR flag here so that the FW reset thread will later
9335 * abort when it gets the rtnl_lock() and sees the flag.
9337 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
9338 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9341 #ifdef CONFIG_BNXT_SRIOV
9342 if (bp->sriov_cfg) {
9343 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
9345 BNXT_SRIOV_CFG_WAIT_TMO);
9347 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
9350 __bnxt_close_nic(bp, irq_re_init, link_re_init);
9354 static int bnxt_close(struct net_device *dev)
9356 struct bnxt *bp = netdev_priv(dev);
9358 bnxt_hwmon_close(bp);
9359 bnxt_close_nic(bp, true, true);
9360 bnxt_hwrm_shutdown_link(bp);
9361 bnxt_hwrm_if_change(bp, false);
9365 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
9368 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
9369 struct hwrm_port_phy_mdio_read_input req = {0};
9372 if (bp->hwrm_spec_code < 0x10a00)
9375 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
9376 req.port_id = cpu_to_le16(bp->pf.port_id);
9377 req.phy_addr = phy_addr;
9378 req.reg_addr = cpu_to_le16(reg & 0x1f);
9379 if (mdio_phy_id_is_c45(phy_addr)) {
9381 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9382 req.dev_addr = mdio_phy_id_devad(phy_addr);
9383 req.reg_addr = cpu_to_le16(reg);
9386 mutex_lock(&bp->hwrm_cmd_lock);
9387 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9389 *val = le16_to_cpu(resp->reg_data);
9390 mutex_unlock(&bp->hwrm_cmd_lock);
9394 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
9397 struct hwrm_port_phy_mdio_write_input req = {0};
9399 if (bp->hwrm_spec_code < 0x10a00)
9402 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
9403 req.port_id = cpu_to_le16(bp->pf.port_id);
9404 req.phy_addr = phy_addr;
9405 req.reg_addr = cpu_to_le16(reg & 0x1f);
9406 if (mdio_phy_id_is_c45(phy_addr)) {
9408 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9409 req.dev_addr = mdio_phy_id_devad(phy_addr);
9410 req.reg_addr = cpu_to_le16(reg);
9412 req.reg_data = cpu_to_le16(val);
9414 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9417 /* rtnl_lock held */
9418 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9420 struct mii_ioctl_data *mdio = if_mii(ifr);
9421 struct bnxt *bp = netdev_priv(dev);
9426 mdio->phy_id = bp->link_info.phy_addr;
9432 if (!netif_running(dev))
9435 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
9437 mdio->val_out = mii_regval;
9442 if (!netif_running(dev))
9445 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
9455 static void bnxt_get_ring_stats(struct bnxt *bp,
9456 struct rtnl_link_stats64 *stats)
9461 for (i = 0; i < bp->cp_nr_rings; i++) {
9462 struct bnxt_napi *bnapi = bp->bnapi[i];
9463 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9464 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
9466 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
9467 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
9468 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
9470 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
9471 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
9472 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
9474 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
9475 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
9476 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
9478 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
9479 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
9480 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
9482 stats->rx_missed_errors +=
9483 le64_to_cpu(hw_stats->rx_discard_pkts);
9485 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
9487 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
9491 static void bnxt_add_prev_stats(struct bnxt *bp,
9492 struct rtnl_link_stats64 *stats)
9494 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
9496 stats->rx_packets += prev_stats->rx_packets;
9497 stats->tx_packets += prev_stats->tx_packets;
9498 stats->rx_bytes += prev_stats->rx_bytes;
9499 stats->tx_bytes += prev_stats->tx_bytes;
9500 stats->rx_missed_errors += prev_stats->rx_missed_errors;
9501 stats->multicast += prev_stats->multicast;
9502 stats->tx_dropped += prev_stats->tx_dropped;
9506 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
9508 struct bnxt *bp = netdev_priv(dev);
9510 set_bit(BNXT_STATE_READ_STATS, &bp->state);
9511 /* Make sure bnxt_close_nic() sees that we are reading stats before
9512 * we check the BNXT_STATE_OPEN flag.
9514 smp_mb__after_atomic();
9515 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9516 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9517 *stats = bp->net_stats_prev;
9521 bnxt_get_ring_stats(bp, stats);
9522 bnxt_add_prev_stats(bp, stats);
9524 if (bp->flags & BNXT_FLAG_PORT_STATS) {
9525 struct rx_port_stats *rx = bp->hw_rx_port_stats;
9526 struct tx_port_stats *tx = bp->hw_tx_port_stats;
9528 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
9529 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
9530 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
9531 le64_to_cpu(rx->rx_ovrsz_frames) +
9532 le64_to_cpu(rx->rx_runt_frames);
9533 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
9534 le64_to_cpu(rx->rx_jbr_frames);
9535 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
9536 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
9537 stats->tx_errors = le64_to_cpu(tx->tx_err);
9539 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9542 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
9544 struct net_device *dev = bp->dev;
9545 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9546 struct netdev_hw_addr *ha;
9549 bool update = false;
9552 netdev_for_each_mc_addr(ha, dev) {
9553 if (mc_count >= BNXT_MAX_MC_ADDRS) {
9554 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9555 vnic->mc_list_count = 0;
9559 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
9560 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
9567 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
9569 if (mc_count != vnic->mc_list_count) {
9570 vnic->mc_list_count = mc_count;
9576 static bool bnxt_uc_list_updated(struct bnxt *bp)
9578 struct net_device *dev = bp->dev;
9579 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9580 struct netdev_hw_addr *ha;
9583 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
9586 netdev_for_each_uc_addr(ha, dev) {
9587 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
9595 static void bnxt_set_rx_mode(struct net_device *dev)
9597 struct bnxt *bp = netdev_priv(dev);
9598 struct bnxt_vnic_info *vnic;
9599 bool mc_update = false;
9603 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
9606 vnic = &bp->vnic_info[0];
9607 mask = vnic->rx_mask;
9608 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
9609 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
9610 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
9611 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
9613 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
9614 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9616 uc_update = bnxt_uc_list_updated(bp);
9618 if (dev->flags & IFF_BROADCAST)
9619 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
9620 if (dev->flags & IFF_ALLMULTI) {
9621 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9622 vnic->mc_list_count = 0;
9624 mc_update = bnxt_mc_list_updated(bp, &mask);
9627 if (mask != vnic->rx_mask || uc_update || mc_update) {
9628 vnic->rx_mask = mask;
9630 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
9631 bnxt_queue_sp_work(bp);
9635 static int bnxt_cfg_rx_mode(struct bnxt *bp)
9637 struct net_device *dev = bp->dev;
9638 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9639 struct netdev_hw_addr *ha;
9643 netif_addr_lock_bh(dev);
9644 uc_update = bnxt_uc_list_updated(bp);
9645 netif_addr_unlock_bh(dev);
9650 mutex_lock(&bp->hwrm_cmd_lock);
9651 for (i = 1; i < vnic->uc_filter_count; i++) {
9652 struct hwrm_cfa_l2_filter_free_input req = {0};
9654 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
9657 req.l2_filter_id = vnic->fw_l2_filter_id[i];
9659 rc = _hwrm_send_message(bp, &req, sizeof(req),
9662 mutex_unlock(&bp->hwrm_cmd_lock);
9664 vnic->uc_filter_count = 1;
9666 netif_addr_lock_bh(dev);
9667 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
9668 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9670 netdev_for_each_uc_addr(ha, dev) {
9671 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
9673 vnic->uc_filter_count++;
9676 netif_addr_unlock_bh(dev);
9678 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
9679 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
9681 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
9683 vnic->uc_filter_count = i;
9689 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9690 if (rc && vnic->mc_list_count) {
9691 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
9693 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9694 vnic->mc_list_count = 0;
9695 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9698 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
9704 static bool bnxt_can_reserve_rings(struct bnxt *bp)
9706 #ifdef CONFIG_BNXT_SRIOV
9707 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
9708 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9710 /* No minimum rings were provisioned by the PF. Don't
9711 * reserve rings by default when device is down.
9713 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
9716 if (!netif_running(bp->dev))
9723 /* If the chip and firmware supports RFS */
9724 static bool bnxt_rfs_supported(struct bnxt *bp)
9726 if (bp->flags & BNXT_FLAG_CHIP_P5) {
9727 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
9731 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
9733 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9738 /* If runtime conditions support RFS */
9739 static bool bnxt_rfs_capable(struct bnxt *bp)
9741 #ifdef CONFIG_RFS_ACCEL
9742 int vnics, max_vnics, max_rss_ctxs;
9744 if (bp->flags & BNXT_FLAG_CHIP_P5)
9745 return bnxt_rfs_supported(bp);
9746 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
9749 vnics = 1 + bp->rx_nr_rings;
9750 max_vnics = bnxt_get_max_func_vnics(bp);
9751 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
9753 /* RSS contexts not a limiting factor */
9754 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9755 max_rss_ctxs = max_vnics;
9756 if (vnics > max_vnics || vnics > max_rss_ctxs) {
9757 if (bp->rx_nr_rings > 1)
9758 netdev_warn(bp->dev,
9759 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
9760 min(max_rss_ctxs - 1, max_vnics - 1));
9764 if (!BNXT_NEW_RM(bp))
9767 if (vnics == bp->hw_resc.resv_vnics)
9770 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
9771 if (vnics <= bp->hw_resc.resv_vnics)
9774 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
9775 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
9782 static netdev_features_t bnxt_fix_features(struct net_device *dev,
9783 netdev_features_t features)
9785 struct bnxt *bp = netdev_priv(dev);
9787 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
9788 features &= ~NETIF_F_NTUPLE;
9790 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9791 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
9793 if (!(features & NETIF_F_GRO))
9794 features &= ~NETIF_F_GRO_HW;
9796 if (features & NETIF_F_GRO_HW)
9797 features &= ~NETIF_F_LRO;
9799 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
9800 * turned on or off together.
9802 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
9803 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
9804 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
9805 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9806 NETIF_F_HW_VLAN_STAG_RX);
9808 features |= NETIF_F_HW_VLAN_CTAG_RX |
9809 NETIF_F_HW_VLAN_STAG_RX;
9811 #ifdef CONFIG_BNXT_SRIOV
9814 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9815 NETIF_F_HW_VLAN_STAG_RX);
9822 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
9824 struct bnxt *bp = netdev_priv(dev);
9825 u32 flags = bp->flags;
9828 bool re_init = false;
9829 bool update_tpa = false;
9831 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
9832 if (features & NETIF_F_GRO_HW)
9833 flags |= BNXT_FLAG_GRO;
9834 else if (features & NETIF_F_LRO)
9835 flags |= BNXT_FLAG_LRO;
9837 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9838 flags &= ~BNXT_FLAG_TPA;
9840 if (features & NETIF_F_HW_VLAN_CTAG_RX)
9841 flags |= BNXT_FLAG_STRIP_VLAN;
9843 if (features & NETIF_F_NTUPLE)
9844 flags |= BNXT_FLAG_RFS;
9846 changes = flags ^ bp->flags;
9847 if (changes & BNXT_FLAG_TPA) {
9849 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
9850 (flags & BNXT_FLAG_TPA) == 0 ||
9851 (bp->flags & BNXT_FLAG_CHIP_P5))
9855 if (changes & ~BNXT_FLAG_TPA)
9858 if (flags != bp->flags) {
9859 u32 old_flags = bp->flags;
9861 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9864 bnxt_set_ring_params(bp);
9869 bnxt_close_nic(bp, false, false);
9872 bnxt_set_ring_params(bp);
9874 return bnxt_open_nic(bp, false, false);
9878 rc = bnxt_set_tpa(bp,
9879 (flags & BNXT_FLAG_TPA) ?
9882 bp->flags = old_flags;
9888 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9889 u32 ring_id, u32 *prod, u32 *cons)
9891 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9892 struct hwrm_dbg_ring_info_get_input req = {0};
9895 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9896 req.ring_type = ring_type;
9897 req.fw_ring_id = cpu_to_le32(ring_id);
9898 mutex_lock(&bp->hwrm_cmd_lock);
9899 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9901 *prod = le32_to_cpu(resp->producer_index);
9902 *cons = le32_to_cpu(resp->consumer_index);
9904 mutex_unlock(&bp->hwrm_cmd_lock);
9908 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9910 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9911 int i = bnapi->index;
9916 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9917 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9921 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9923 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9924 int i = bnapi->index;
9929 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9930 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9931 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9932 rxr->rx_sw_agg_prod);
9935 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9937 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9938 int i = bnapi->index;
9940 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9941 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9944 static void bnxt_dbg_dump_states(struct bnxt *bp)
9947 struct bnxt_napi *bnapi;
9949 for (i = 0; i < bp->cp_nr_rings; i++) {
9950 bnapi = bp->bnapi[i];
9951 if (netif_msg_drv(bp)) {
9952 bnxt_dump_tx_sw_state(bnapi);
9953 bnxt_dump_rx_sw_state(bnapi);
9954 bnxt_dump_cp_sw_state(bnapi);
9959 static void bnxt_reset_task(struct bnxt *bp, bool silent)
9962 bnxt_dbg_dump_states(bp);
9963 if (netif_running(bp->dev)) {
9967 bnxt_close_nic(bp, false, false);
9968 bnxt_open_nic(bp, false, false);
9971 bnxt_close_nic(bp, true, false);
9972 rc = bnxt_open_nic(bp, true, false);
9973 bnxt_ulp_start(bp, rc);
9978 static void bnxt_tx_timeout(struct net_device *dev)
9980 struct bnxt *bp = netdev_priv(dev);
9982 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
9983 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
9984 bnxt_queue_sp_work(bp);
9987 static void bnxt_fw_health_check(struct bnxt *bp)
9989 struct bnxt_fw_health *fw_health = bp->fw_health;
9992 if (!fw_health || !fw_health->enabled ||
9993 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9996 if (fw_health->tmr_counter) {
9997 fw_health->tmr_counter--;
10001 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10002 if (val == fw_health->last_fw_heartbeat)
10005 fw_health->last_fw_heartbeat = val;
10007 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10008 if (val != fw_health->last_fw_reset_cnt)
10011 fw_health->tmr_counter = fw_health->tmr_multiplier;
10015 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
10016 bnxt_queue_sp_work(bp);
10019 static void bnxt_timer(struct timer_list *t)
10021 struct bnxt *bp = from_timer(bp, t, timer);
10022 struct net_device *dev = bp->dev;
10024 if (!netif_running(dev))
10027 if (atomic_read(&bp->intr_sem) != 0)
10028 goto bnxt_restart_timer;
10030 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
10031 bnxt_fw_health_check(bp);
10033 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
10034 bp->stats_coal_ticks) {
10035 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
10036 bnxt_queue_sp_work(bp);
10039 if (bnxt_tc_flower_enabled(bp)) {
10040 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
10041 bnxt_queue_sp_work(bp);
10044 if (bp->link_info.phy_retry) {
10045 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
10046 bp->link_info.phy_retry = false;
10047 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
10049 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
10050 bnxt_queue_sp_work(bp);
10054 if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
10055 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
10056 bnxt_queue_sp_work(bp);
10058 bnxt_restart_timer:
10059 mod_timer(&bp->timer, jiffies + bp->current_interval);
10062 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
10064 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
10065 * set. If the device is being closed, bnxt_close() may be holding
10066 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
10067 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
10069 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10073 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
10075 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10079 /* Only called from bnxt_sp_task() */
10080 static void bnxt_reset(struct bnxt *bp, bool silent)
10082 bnxt_rtnl_lock_sp(bp);
10083 if (test_bit(BNXT_STATE_OPEN, &bp->state))
10084 bnxt_reset_task(bp, silent);
10085 bnxt_rtnl_unlock_sp(bp);
10088 static void bnxt_fw_reset_close(struct bnxt *bp)
10091 __bnxt_close_nic(bp, true, false);
10092 bnxt_clear_int_mode(bp);
10093 bnxt_hwrm_func_drv_unrgtr(bp);
10094 bnxt_free_ctx_mem(bp);
10099 static bool is_bnxt_fw_ok(struct bnxt *bp)
10101 struct bnxt_fw_health *fw_health = bp->fw_health;
10102 bool no_heartbeat = false, has_reset = false;
10105 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10106 if (val == fw_health->last_fw_heartbeat)
10107 no_heartbeat = true;
10109 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10110 if (val != fw_health->last_fw_reset_cnt)
10113 if (!no_heartbeat && has_reset)
10119 /* rtnl_lock is acquired before calling this function */
10120 static void bnxt_force_fw_reset(struct bnxt *bp)
10122 struct bnxt_fw_health *fw_health = bp->fw_health;
10125 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
10126 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10129 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10130 bnxt_fw_reset_close(bp);
10131 wait_dsecs = fw_health->master_func_wait_dsecs;
10132 if (fw_health->master) {
10133 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
10135 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10137 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
10138 wait_dsecs = fw_health->normal_func_wait_dsecs;
10139 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10142 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
10143 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
10144 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10147 void bnxt_fw_exception(struct bnxt *bp)
10149 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
10150 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10151 bnxt_rtnl_lock_sp(bp);
10152 bnxt_force_fw_reset(bp);
10153 bnxt_rtnl_unlock_sp(bp);
10156 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
10159 static int bnxt_get_registered_vfs(struct bnxt *bp)
10161 #ifdef CONFIG_BNXT_SRIOV
10167 rc = bnxt_hwrm_func_qcfg(bp);
10169 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
10172 if (bp->pf.registered_vfs)
10173 return bp->pf.registered_vfs;
10180 void bnxt_fw_reset(struct bnxt *bp)
10182 bnxt_rtnl_lock_sp(bp);
10183 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
10184 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10187 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10188 if (bp->pf.active_vfs &&
10189 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10190 n = bnxt_get_registered_vfs(bp);
10192 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
10194 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10195 dev_close(bp->dev);
10196 goto fw_reset_exit;
10197 } else if (n > 0) {
10198 u16 vf_tmo_dsecs = n * 10;
10200 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
10201 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
10202 bp->fw_reset_state =
10203 BNXT_FW_RESET_STATE_POLL_VF;
10204 bnxt_queue_fw_reset_work(bp, HZ / 10);
10205 goto fw_reset_exit;
10207 bnxt_fw_reset_close(bp);
10208 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10209 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10212 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10213 tmo = bp->fw_reset_min_dsecs * HZ / 10;
10215 bnxt_queue_fw_reset_work(bp, tmo);
10218 bnxt_rtnl_unlock_sp(bp);
10221 static void bnxt_chk_missed_irq(struct bnxt *bp)
10225 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10228 for (i = 0; i < bp->cp_nr_rings; i++) {
10229 struct bnxt_napi *bnapi = bp->bnapi[i];
10230 struct bnxt_cp_ring_info *cpr;
10237 cpr = &bnapi->cp_ring;
10238 for (j = 0; j < 2; j++) {
10239 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
10242 if (!cpr2 || cpr2->has_more_work ||
10243 !bnxt_has_work(bp, cpr2))
10246 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
10247 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
10250 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
10251 bnxt_dbg_hwrm_ring_info_get(bp,
10252 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
10253 fw_ring_id, &val[0], &val[1]);
10254 cpr->missed_irqs++;
10259 static void bnxt_cfg_ntp_filters(struct bnxt *);
10261 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
10263 struct bnxt_link_info *link_info = &bp->link_info;
10265 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
10266 link_info->autoneg = BNXT_AUTONEG_SPEED;
10267 if (bp->hwrm_spec_code >= 0x10201) {
10268 if (link_info->auto_pause_setting &
10269 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10270 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10272 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10274 link_info->advertising = link_info->auto_link_speeds;
10276 link_info->req_link_speed = link_info->force_link_speed;
10277 link_info->req_duplex = link_info->duplex_setting;
10279 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10280 link_info->req_flow_ctrl =
10281 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10283 link_info->req_flow_ctrl = link_info->force_pause_setting;
10286 static void bnxt_sp_task(struct work_struct *work)
10288 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
10290 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10291 smp_mb__after_atomic();
10292 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10293 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10297 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
10298 bnxt_cfg_rx_mode(bp);
10300 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
10301 bnxt_cfg_ntp_filters(bp);
10302 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
10303 bnxt_hwrm_exec_fwd_req(bp);
10304 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10305 bnxt_hwrm_tunnel_dst_port_alloc(
10306 bp, bp->vxlan_port,
10307 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10309 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10310 bnxt_hwrm_tunnel_dst_port_free(
10311 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10313 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10314 bnxt_hwrm_tunnel_dst_port_alloc(
10316 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10318 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10319 bnxt_hwrm_tunnel_dst_port_free(
10320 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10322 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
10323 bnxt_hwrm_port_qstats(bp);
10324 bnxt_hwrm_port_qstats_ext(bp);
10325 bnxt_hwrm_pcie_qstats(bp);
10328 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
10331 mutex_lock(&bp->link_lock);
10332 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
10334 bnxt_hwrm_phy_qcaps(bp);
10336 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
10338 bnxt_init_ethtool_link_settings(bp);
10340 rc = bnxt_update_link(bp, true);
10341 mutex_unlock(&bp->link_lock);
10343 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
10346 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
10349 mutex_lock(&bp->link_lock);
10350 rc = bnxt_update_phy_setting(bp);
10351 mutex_unlock(&bp->link_lock);
10353 netdev_warn(bp->dev, "update phy settings retry failed\n");
10355 bp->link_info.phy_retry = false;
10356 netdev_info(bp->dev, "update phy settings retry succeeded\n");
10359 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
10360 mutex_lock(&bp->link_lock);
10361 bnxt_get_port_module_status(bp);
10362 mutex_unlock(&bp->link_lock);
10365 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
10366 bnxt_tc_flow_stats_work(bp);
10368 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
10369 bnxt_chk_missed_irq(bp);
10371 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
10372 * must be the last functions to be called before exiting.
10374 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
10375 bnxt_reset(bp, false);
10377 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
10378 bnxt_reset(bp, true);
10380 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
10381 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
10383 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
10384 if (!is_bnxt_fw_ok(bp))
10385 bnxt_devlink_health_report(bp,
10386 BNXT_FW_EXCEPTION_SP_EVENT);
10389 smp_mb__before_atomic();
10390 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10393 /* Under rtnl_lock */
10394 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
10397 int max_rx, max_tx, tx_sets = 1;
10398 int tx_rings_needed, stats;
10405 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
10412 tx_rings_needed = tx * tx_sets + tx_xdp;
10413 if (max_tx < tx_rings_needed)
10417 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
10420 if (bp->flags & BNXT_FLAG_AGG_RINGS)
10422 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
10424 if (BNXT_NEW_RM(bp)) {
10425 cp += bnxt_get_ulp_msix_num(bp);
10426 stats += bnxt_get_ulp_stat_ctxs(bp);
10428 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
10432 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
10435 pci_iounmap(pdev, bp->bar2);
10440 pci_iounmap(pdev, bp->bar1);
10445 pci_iounmap(pdev, bp->bar0);
10450 static void bnxt_cleanup_pci(struct bnxt *bp)
10452 bnxt_unmap_bars(bp, bp->pdev);
10453 pci_release_regions(bp->pdev);
10454 if (pci_is_enabled(bp->pdev))
10455 pci_disable_device(bp->pdev);
10458 static void bnxt_init_dflt_coal(struct bnxt *bp)
10460 struct bnxt_coal *coal;
10462 /* Tick values in micro seconds.
10463 * 1 coal_buf x bufs_per_record = 1 completion record.
10465 coal = &bp->rx_coal;
10466 coal->coal_ticks = 10;
10467 coal->coal_bufs = 30;
10468 coal->coal_ticks_irq = 1;
10469 coal->coal_bufs_irq = 2;
10470 coal->idle_thresh = 50;
10471 coal->bufs_per_record = 2;
10472 coal->budget = 64; /* NAPI budget */
10474 coal = &bp->tx_coal;
10475 coal->coal_ticks = 28;
10476 coal->coal_bufs = 30;
10477 coal->coal_ticks_irq = 2;
10478 coal->coal_bufs_irq = 2;
10479 coal->bufs_per_record = 1;
10481 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
10484 static void bnxt_alloc_fw_health(struct bnxt *bp)
10489 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
10490 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10493 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
10494 if (!bp->fw_health) {
10495 netdev_warn(bp->dev, "Failed to allocate fw_health\n");
10496 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
10497 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10501 static int bnxt_fw_init_one_p1(struct bnxt *bp)
10506 rc = bnxt_hwrm_ver_get(bp);
10510 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10511 rc = bnxt_alloc_kong_hwrm_resources(bp);
10513 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10516 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10517 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
10518 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10522 rc = bnxt_hwrm_func_reset(bp);
10526 bnxt_hwrm_fw_set_time(bp);
10530 static int bnxt_fw_init_one_p2(struct bnxt *bp)
10534 /* Get the MAX capabilities for this function */
10535 rc = bnxt_hwrm_func_qcaps(bp);
10537 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10542 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
10544 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
10547 bnxt_alloc_fw_health(bp);
10548 rc = bnxt_hwrm_error_recovery_qcfg(bp);
10550 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
10553 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
10557 bnxt_hwrm_func_qcfg(bp);
10558 bnxt_hwrm_vnic_qcaps(bp);
10559 bnxt_hwrm_port_led_qcaps(bp);
10560 bnxt_ethtool_init(bp);
10565 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
10567 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
10568 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10569 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10570 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10571 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
10572 if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
10573 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10574 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10575 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10579 static void bnxt_set_dflt_rfs(struct bnxt *bp)
10581 struct net_device *dev = bp->dev;
10583 dev->hw_features &= ~NETIF_F_NTUPLE;
10584 dev->features &= ~NETIF_F_NTUPLE;
10585 bp->flags &= ~BNXT_FLAG_RFS;
10586 if (bnxt_rfs_supported(bp)) {
10587 dev->hw_features |= NETIF_F_NTUPLE;
10588 if (bnxt_rfs_capable(bp)) {
10589 bp->flags |= BNXT_FLAG_RFS;
10590 dev->features |= NETIF_F_NTUPLE;
10595 static void bnxt_fw_init_one_p3(struct bnxt *bp)
10597 struct pci_dev *pdev = bp->pdev;
10599 bnxt_set_dflt_rss_hash_type(bp);
10600 bnxt_set_dflt_rfs(bp);
10602 bnxt_get_wol_settings(bp);
10603 if (bp->flags & BNXT_FLAG_WOL_CAP)
10604 device_set_wakeup_enable(&pdev->dev, bp->wol);
10606 device_set_wakeup_capable(&pdev->dev, false);
10608 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10609 bnxt_hwrm_coal_params_qcaps(bp);
10612 static int bnxt_fw_init_one(struct bnxt *bp)
10616 rc = bnxt_fw_init_one_p1(bp);
10618 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
10621 rc = bnxt_fw_init_one_p2(bp);
10623 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
10626 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
10629 bnxt_fw_init_one_p3(bp);
10633 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
10635 struct bnxt_fw_health *fw_health = bp->fw_health;
10636 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
10637 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
10638 u32 reg_type, reg_off, delay_msecs;
10640 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
10641 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
10642 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
10643 switch (reg_type) {
10644 case BNXT_FW_HEALTH_REG_TYPE_CFG:
10645 pci_write_config_dword(bp->pdev, reg_off, val);
10647 case BNXT_FW_HEALTH_REG_TYPE_GRC:
10648 writel(reg_off & BNXT_GRC_BASE_MASK,
10649 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
10650 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
10652 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
10653 writel(val, bp->bar0 + reg_off);
10655 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
10656 writel(val, bp->bar1 + reg_off);
10660 pci_read_config_dword(bp->pdev, 0, &val);
10661 msleep(delay_msecs);
10665 static void bnxt_reset_all(struct bnxt *bp)
10667 struct bnxt_fw_health *fw_health = bp->fw_health;
10670 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10671 #ifdef CONFIG_TEE_BNXT_FW
10672 rc = tee_bnxt_fw_load();
10674 netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
10675 bp->fw_reset_timestamp = jiffies;
10680 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
10681 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
10682 bnxt_fw_reset_writel(bp, i);
10683 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
10684 struct hwrm_fw_reset_input req = {0};
10686 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
10687 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
10688 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
10689 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
10690 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
10691 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10693 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
10695 bp->fw_reset_timestamp = jiffies;
10698 static void bnxt_fw_reset_task(struct work_struct *work)
10700 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
10703 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10704 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
10708 switch (bp->fw_reset_state) {
10709 case BNXT_FW_RESET_STATE_POLL_VF: {
10710 int n = bnxt_get_registered_vfs(bp);
10714 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
10715 n, jiffies_to_msecs(jiffies -
10716 bp->fw_reset_timestamp));
10717 goto fw_reset_abort;
10718 } else if (n > 0) {
10719 if (time_after(jiffies, bp->fw_reset_timestamp +
10720 (bp->fw_reset_max_dsecs * HZ / 10))) {
10721 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10722 bp->fw_reset_state = 0;
10723 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
10727 bnxt_queue_fw_reset_work(bp, HZ / 10);
10730 bp->fw_reset_timestamp = jiffies;
10732 bnxt_fw_reset_close(bp);
10733 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10734 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10737 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10738 tmo = bp->fw_reset_min_dsecs * HZ / 10;
10741 bnxt_queue_fw_reset_work(bp, tmo);
10744 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
10747 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
10748 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
10749 !time_after(jiffies, bp->fw_reset_timestamp +
10750 (bp->fw_reset_max_dsecs * HZ / 10))) {
10751 bnxt_queue_fw_reset_work(bp, HZ / 5);
10755 if (!bp->fw_health->master) {
10756 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
10758 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10759 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10762 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10765 case BNXT_FW_RESET_STATE_RESET_FW:
10766 bnxt_reset_all(bp);
10767 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10768 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
10770 case BNXT_FW_RESET_STATE_ENABLE_DEV:
10771 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
10775 val = bnxt_fw_health_readl(bp,
10776 BNXT_FW_RESET_INPROG_REG);
10778 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
10781 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10782 if (pci_enable_device(bp->pdev)) {
10783 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
10784 goto fw_reset_abort;
10786 pci_set_master(bp->pdev);
10787 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
10789 case BNXT_FW_RESET_STATE_POLL_FW:
10790 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
10791 rc = __bnxt_hwrm_ver_get(bp, true);
10793 if (time_after(jiffies, bp->fw_reset_timestamp +
10794 (bp->fw_reset_max_dsecs * HZ / 10))) {
10795 netdev_err(bp->dev, "Firmware reset aborted\n");
10796 goto fw_reset_abort;
10798 bnxt_queue_fw_reset_work(bp, HZ / 5);
10801 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10802 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
10804 case BNXT_FW_RESET_STATE_OPENING:
10805 while (!rtnl_trylock()) {
10806 bnxt_queue_fw_reset_work(bp, HZ / 10);
10809 rc = bnxt_open(bp->dev);
10811 netdev_err(bp->dev, "bnxt_open_nic() failed\n");
10812 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10813 dev_close(bp->dev);
10816 bp->fw_reset_state = 0;
10817 /* Make sure fw_reset_state is 0 before clearing the flag */
10818 smp_mb__before_atomic();
10819 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10820 bnxt_ulp_start(bp, rc);
10821 bnxt_dl_health_status_update(bp, true);
10828 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10829 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
10830 bnxt_dl_health_status_update(bp, false);
10831 bp->fw_reset_state = 0;
10833 dev_close(bp->dev);
10837 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
10840 struct bnxt *bp = netdev_priv(dev);
10842 SET_NETDEV_DEV(dev, &pdev->dev);
10844 /* enable device (incl. PCI PM wakeup), and bus-mastering */
10845 rc = pci_enable_device(pdev);
10847 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
10851 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10852 dev_err(&pdev->dev,
10853 "Cannot find PCI device base address, aborting\n");
10855 goto init_err_disable;
10858 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10860 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
10861 goto init_err_disable;
10864 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
10865 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
10866 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
10867 goto init_err_disable;
10870 pci_set_master(pdev);
10875 bp->bar0 = pci_ioremap_bar(pdev, 0);
10877 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
10879 goto init_err_release;
10882 bp->bar1 = pci_ioremap_bar(pdev, 2);
10884 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
10886 goto init_err_release;
10889 bp->bar2 = pci_ioremap_bar(pdev, 4);
10891 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
10893 goto init_err_release;
10896 pci_enable_pcie_error_reporting(pdev);
10898 INIT_WORK(&bp->sp_task, bnxt_sp_task);
10899 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
10901 spin_lock_init(&bp->ntp_fltr_lock);
10902 #if BITS_PER_LONG == 32
10903 spin_lock_init(&bp->db_lock);
10906 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
10907 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
10909 bnxt_init_dflt_coal(bp);
10911 timer_setup(&bp->timer, bnxt_timer, 0);
10912 bp->current_interval = BNXT_TIMER_INTERVAL;
10914 clear_bit(BNXT_STATE_OPEN, &bp->state);
10918 bnxt_unmap_bars(bp, pdev);
10919 pci_release_regions(pdev);
10922 pci_disable_device(pdev);
10928 /* rtnl_lock held */
10929 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
10931 struct sockaddr *addr = p;
10932 struct bnxt *bp = netdev_priv(dev);
10935 if (!is_valid_ether_addr(addr->sa_data))
10936 return -EADDRNOTAVAIL;
10938 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
10941 rc = bnxt_approve_mac(bp, addr->sa_data, true);
10945 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10946 if (netif_running(dev)) {
10947 bnxt_close_nic(bp, false, false);
10948 rc = bnxt_open_nic(bp, false, false);
10954 /* rtnl_lock held */
10955 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
10957 struct bnxt *bp = netdev_priv(dev);
10959 if (netif_running(dev))
10960 bnxt_close_nic(bp, false, false);
10962 dev->mtu = new_mtu;
10963 bnxt_set_ring_params(bp);
10965 if (netif_running(dev))
10966 return bnxt_open_nic(bp, false, false);
10971 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
10973 struct bnxt *bp = netdev_priv(dev);
10977 if (tc > bp->max_tc) {
10978 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
10983 if (netdev_get_num_tc(dev) == tc)
10986 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10989 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
10990 sh, tc, bp->tx_nr_rings_xdp);
10994 /* Needs to close the device and do hw resource re-allocations */
10995 if (netif_running(bp->dev))
10996 bnxt_close_nic(bp, true, false);
10999 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
11000 netdev_set_num_tc(dev, tc);
11002 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11003 netdev_reset_tc(dev);
11005 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
11006 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
11007 bp->tx_nr_rings + bp->rx_nr_rings;
11009 if (netif_running(bp->dev))
11010 return bnxt_open_nic(bp, true, false);
11015 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
11018 struct bnxt *bp = cb_priv;
11020 if (!bnxt_tc_flower_enabled(bp) ||
11021 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
11022 return -EOPNOTSUPP;
11025 case TC_SETUP_CLSFLOWER:
11026 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
11028 return -EOPNOTSUPP;
11032 LIST_HEAD(bnxt_block_cb_list);
11034 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
11037 struct bnxt *bp = netdev_priv(dev);
11040 case TC_SETUP_BLOCK:
11041 return flow_block_cb_setup_simple(type_data,
11042 &bnxt_block_cb_list,
11043 bnxt_setup_tc_block_cb,
11045 case TC_SETUP_QDISC_MQPRIO: {
11046 struct tc_mqprio_qopt *mqprio = type_data;
11048 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
11050 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
11053 return -EOPNOTSUPP;
11057 #ifdef CONFIG_RFS_ACCEL
11058 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
11059 struct bnxt_ntuple_filter *f2)
11061 struct flow_keys *keys1 = &f1->fkeys;
11062 struct flow_keys *keys2 = &f2->fkeys;
11064 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
11065 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
11066 keys1->ports.ports == keys2->ports.ports &&
11067 keys1->basic.ip_proto == keys2->basic.ip_proto &&
11068 keys1->basic.n_proto == keys2->basic.n_proto &&
11069 keys1->control.flags == keys2->control.flags &&
11070 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
11071 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
11077 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
11078 u16 rxq_index, u32 flow_id)
11080 struct bnxt *bp = netdev_priv(dev);
11081 struct bnxt_ntuple_filter *fltr, *new_fltr;
11082 struct flow_keys *fkeys;
11083 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
11084 int rc = 0, idx, bit_id, l2_idx = 0;
11085 struct hlist_head *head;
11087 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
11088 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11091 netif_addr_lock_bh(dev);
11092 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
11093 if (ether_addr_equal(eth->h_dest,
11094 vnic->uc_list + off)) {
11099 netif_addr_unlock_bh(dev);
11103 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
11107 fkeys = &new_fltr->fkeys;
11108 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
11109 rc = -EPROTONOSUPPORT;
11113 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
11114 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
11115 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
11116 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
11117 rc = -EPROTONOSUPPORT;
11120 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
11121 bp->hwrm_spec_code < 0x10601) {
11122 rc = -EPROTONOSUPPORT;
11125 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
11126 bp->hwrm_spec_code < 0x10601) {
11127 rc = -EPROTONOSUPPORT;
11131 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
11132 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
11134 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
11135 head = &bp->ntp_fltr_hash_tbl[idx];
11137 hlist_for_each_entry_rcu(fltr, head, hash) {
11138 if (bnxt_fltr_match(fltr, new_fltr)) {
11146 spin_lock_bh(&bp->ntp_fltr_lock);
11147 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
11148 BNXT_NTP_FLTR_MAX_FLTR, 0);
11150 spin_unlock_bh(&bp->ntp_fltr_lock);
11155 new_fltr->sw_id = (u16)bit_id;
11156 new_fltr->flow_id = flow_id;
11157 new_fltr->l2_fltr_idx = l2_idx;
11158 new_fltr->rxq = rxq_index;
11159 hlist_add_head_rcu(&new_fltr->hash, head);
11160 bp->ntp_fltr_count++;
11161 spin_unlock_bh(&bp->ntp_fltr_lock);
11163 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11164 bnxt_queue_sp_work(bp);
11166 return new_fltr->sw_id;
11173 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11177 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
11178 struct hlist_head *head;
11179 struct hlist_node *tmp;
11180 struct bnxt_ntuple_filter *fltr;
11183 head = &bp->ntp_fltr_hash_tbl[i];
11184 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
11187 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
11188 if (rps_may_expire_flow(bp->dev, fltr->rxq,
11191 bnxt_hwrm_cfa_ntuple_filter_free(bp,
11196 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
11201 set_bit(BNXT_FLTR_VALID, &fltr->state);
11205 spin_lock_bh(&bp->ntp_fltr_lock);
11206 hlist_del_rcu(&fltr->hash);
11207 bp->ntp_fltr_count--;
11208 spin_unlock_bh(&bp->ntp_fltr_lock);
11210 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
11215 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
11216 netdev_info(bp->dev, "Receive PF driver unload event!");
11221 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11225 #endif /* CONFIG_RFS_ACCEL */
11227 static void bnxt_udp_tunnel_add(struct net_device *dev,
11228 struct udp_tunnel_info *ti)
11230 struct bnxt *bp = netdev_priv(dev);
11232 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
11235 if (!netif_running(dev))
11238 switch (ti->type) {
11239 case UDP_TUNNEL_TYPE_VXLAN:
11240 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
11243 bp->vxlan_port_cnt++;
11244 if (bp->vxlan_port_cnt == 1) {
11245 bp->vxlan_port = ti->port;
11246 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
11247 bnxt_queue_sp_work(bp);
11250 case UDP_TUNNEL_TYPE_GENEVE:
11251 if (bp->nge_port_cnt && bp->nge_port != ti->port)
11254 bp->nge_port_cnt++;
11255 if (bp->nge_port_cnt == 1) {
11256 bp->nge_port = ti->port;
11257 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
11264 bnxt_queue_sp_work(bp);
11267 static void bnxt_udp_tunnel_del(struct net_device *dev,
11268 struct udp_tunnel_info *ti)
11270 struct bnxt *bp = netdev_priv(dev);
11272 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
11275 if (!netif_running(dev))
11278 switch (ti->type) {
11279 case UDP_TUNNEL_TYPE_VXLAN:
11280 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
11282 bp->vxlan_port_cnt--;
11284 if (bp->vxlan_port_cnt != 0)
11287 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
11289 case UDP_TUNNEL_TYPE_GENEVE:
11290 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
11292 bp->nge_port_cnt--;
11294 if (bp->nge_port_cnt != 0)
11297 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
11303 bnxt_queue_sp_work(bp);
11306 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11307 struct net_device *dev, u32 filter_mask,
11310 struct bnxt *bp = netdev_priv(dev);
11312 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
11313 nlflags, filter_mask, NULL);
11316 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
11317 u16 flags, struct netlink_ext_ack *extack)
11319 struct bnxt *bp = netdev_priv(dev);
11320 struct nlattr *attr, *br_spec;
11323 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
11324 return -EOPNOTSUPP;
11326 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11330 nla_for_each_nested(attr, br_spec, rem) {
11333 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11336 if (nla_len(attr) < sizeof(mode))
11339 mode = nla_get_u16(attr);
11340 if (mode == bp->br_mode)
11343 rc = bnxt_hwrm_set_br_mode(bp, mode);
11345 bp->br_mode = mode;
11351 int bnxt_get_port_parent_id(struct net_device *dev,
11352 struct netdev_phys_item_id *ppid)
11354 struct bnxt *bp = netdev_priv(dev);
11356 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
11357 return -EOPNOTSUPP;
11359 /* The PF and it's VF-reps only support the switchdev framework */
11361 return -EOPNOTSUPP;
11363 ppid->id_len = sizeof(bp->switch_id);
11364 memcpy(ppid->id, bp->switch_id, ppid->id_len);
11369 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
11371 struct bnxt *bp = netdev_priv(dev);
11373 return &bp->dl_port;
11376 static const struct net_device_ops bnxt_netdev_ops = {
11377 .ndo_open = bnxt_open,
11378 .ndo_start_xmit = bnxt_start_xmit,
11379 .ndo_stop = bnxt_close,
11380 .ndo_get_stats64 = bnxt_get_stats64,
11381 .ndo_set_rx_mode = bnxt_set_rx_mode,
11382 .ndo_do_ioctl = bnxt_ioctl,
11383 .ndo_validate_addr = eth_validate_addr,
11384 .ndo_set_mac_address = bnxt_change_mac_addr,
11385 .ndo_change_mtu = bnxt_change_mtu,
11386 .ndo_fix_features = bnxt_fix_features,
11387 .ndo_set_features = bnxt_set_features,
11388 .ndo_tx_timeout = bnxt_tx_timeout,
11389 #ifdef CONFIG_BNXT_SRIOV
11390 .ndo_get_vf_config = bnxt_get_vf_config,
11391 .ndo_set_vf_mac = bnxt_set_vf_mac,
11392 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
11393 .ndo_set_vf_rate = bnxt_set_vf_bw,
11394 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
11395 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
11396 .ndo_set_vf_trust = bnxt_set_vf_trust,
11398 .ndo_setup_tc = bnxt_setup_tc,
11399 #ifdef CONFIG_RFS_ACCEL
11400 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
11402 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
11403 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
11404 .ndo_bpf = bnxt_xdp,
11405 .ndo_xdp_xmit = bnxt_xdp_xmit,
11406 .ndo_bridge_getlink = bnxt_bridge_getlink,
11407 .ndo_bridge_setlink = bnxt_bridge_setlink,
11408 .ndo_get_devlink_port = bnxt_get_devlink_port,
11411 static void bnxt_remove_one(struct pci_dev *pdev)
11413 struct net_device *dev = pci_get_drvdata(pdev);
11414 struct bnxt *bp = netdev_priv(dev);
11417 bnxt_sriov_disable(bp);
11418 bnxt_dl_unregister(bp);
11421 pci_disable_pcie_error_reporting(pdev);
11422 unregister_netdev(dev);
11423 bnxt_shutdown_tc(bp);
11424 bnxt_cancel_sp_work(bp);
11427 bnxt_clear_int_mode(bp);
11428 bnxt_hwrm_func_drv_unrgtr(bp);
11429 bnxt_free_hwrm_resources(bp);
11430 bnxt_free_hwrm_short_cmd_req(bp);
11431 bnxt_ethtool_free(bp);
11435 kfree(bp->fw_health);
11436 bp->fw_health = NULL;
11437 bnxt_cleanup_pci(bp);
11438 bnxt_free_ctx_mem(bp);
11441 bnxt_free_port_stats(bp);
11445 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
11448 struct bnxt_link_info *link_info = &bp->link_info;
11450 rc = bnxt_hwrm_phy_qcaps(bp);
11452 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
11456 rc = bnxt_update_link(bp, false);
11458 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
11463 /* Older firmware does not have supported_auto_speeds, so assume
11464 * that all supported speeds can be autonegotiated.
11466 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
11467 link_info->support_auto_speeds = link_info->support_speeds;
11472 bnxt_init_ethtool_link_settings(bp);
11476 static int bnxt_get_max_irq(struct pci_dev *pdev)
11480 if (!pdev->msix_cap)
11483 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
11484 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
11487 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11490 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11491 int max_ring_grps = 0, max_irq;
11493 *max_tx = hw_resc->max_tx_rings;
11494 *max_rx = hw_resc->max_rx_rings;
11495 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
11496 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
11497 bnxt_get_ulp_msix_num(bp),
11498 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
11499 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11500 *max_cp = min_t(int, *max_cp, max_irq);
11501 max_ring_grps = hw_resc->max_hw_ring_grps;
11502 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
11506 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11508 if (bp->flags & BNXT_FLAG_CHIP_P5) {
11509 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
11510 /* On P5 chips, max_cp output param should be available NQs */
11513 *max_rx = min_t(int, *max_rx, max_ring_grps);
11516 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
11520 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
11523 if (!rx || !tx || !cp)
11526 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
11529 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11534 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11535 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
11536 /* Not enough rings, try disabling agg rings. */
11537 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
11538 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11540 /* set BNXT_FLAG_AGG_RINGS back for consistency */
11541 bp->flags |= BNXT_FLAG_AGG_RINGS;
11544 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
11545 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11546 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11547 bnxt_set_ring_params(bp);
11550 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
11551 int max_cp, max_stat, max_irq;
11553 /* Reserve minimum resources for RoCE */
11554 max_cp = bnxt_get_max_func_cp_rings(bp);
11555 max_stat = bnxt_get_max_func_stat_ctxs(bp);
11556 max_irq = bnxt_get_max_func_irqs(bp);
11557 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
11558 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
11559 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
11562 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
11563 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
11564 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
11565 max_cp = min_t(int, max_cp, max_irq);
11566 max_cp = min_t(int, max_cp, max_stat);
11567 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
11574 /* In initial default shared ring setting, each shared ring must have a
11577 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
11579 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
11580 bp->rx_nr_rings = bp->cp_nr_rings;
11581 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
11582 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11585 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
11587 int dflt_rings, max_rx_rings, max_tx_rings, rc;
11589 if (!bnxt_can_reserve_rings(bp))
11593 bp->flags |= BNXT_FLAG_SHARED_RINGS;
11594 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
11595 /* Reduce default rings on multi-port cards so that total default
11596 * rings do not exceed CPU count.
11598 if (bp->port_count > 1) {
11600 max_t(int, num_online_cpus() / bp->port_count, 1);
11602 dflt_rings = min_t(int, dflt_rings, max_rings);
11604 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
11607 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
11608 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
11610 bnxt_trim_dflt_sh_rings(bp);
11612 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
11613 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11615 rc = __bnxt_reserve_rings(bp);
11617 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
11618 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11620 bnxt_trim_dflt_sh_rings(bp);
11622 /* Rings may have been trimmed, re-reserve the trimmed rings. */
11623 if (bnxt_need_reserve_rings(bp)) {
11624 rc = __bnxt_reserve_rings(bp);
11626 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
11627 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11629 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11636 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
11640 if (bp->tx_nr_rings)
11643 bnxt_ulp_irq_stop(bp);
11644 bnxt_clear_int_mode(bp);
11645 rc = bnxt_set_dflt_rings(bp, true);
11647 netdev_err(bp->dev, "Not enough rings available.\n");
11648 goto init_dflt_ring_err;
11650 rc = bnxt_init_int_mode(bp);
11652 goto init_dflt_ring_err;
11654 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11655 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
11656 bp->flags |= BNXT_FLAG_RFS;
11657 bp->dev->features |= NETIF_F_NTUPLE;
11659 init_dflt_ring_err:
11660 bnxt_ulp_irq_restart(bp, rc);
11664 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
11669 bnxt_hwrm_func_qcaps(bp);
11671 if (netif_running(bp->dev))
11672 __bnxt_close_nic(bp, true, false);
11674 bnxt_ulp_irq_stop(bp);
11675 bnxt_clear_int_mode(bp);
11676 rc = bnxt_init_int_mode(bp);
11677 bnxt_ulp_irq_restart(bp, rc);
11679 if (netif_running(bp->dev)) {
11681 dev_close(bp->dev);
11683 rc = bnxt_open_nic(bp, true, false);
11689 static int bnxt_init_mac_addr(struct bnxt *bp)
11694 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
11696 #ifdef CONFIG_BNXT_SRIOV
11697 struct bnxt_vf_info *vf = &bp->vf;
11698 bool strict_approval = true;
11700 if (is_valid_ether_addr(vf->mac_addr)) {
11701 /* overwrite netdev dev_addr with admin VF MAC */
11702 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
11703 /* Older PF driver or firmware may not approve this
11706 strict_approval = false;
11708 eth_hw_addr_random(bp->dev);
11710 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
11716 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
11718 struct pci_dev *pdev = bp->pdev;
11719 int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
11723 netdev_info(bp->dev, "Unable do read adapter's DSN");
11724 return -EOPNOTSUPP;
11727 /* DSN (two dw) is at an offset of 4 from the cap pos */
11729 pci_read_config_dword(pdev, pos, &dw);
11730 put_unaligned_le32(dw, &dsn[0]);
11731 pci_read_config_dword(pdev, pos + 4, &dw);
11732 put_unaligned_le32(dw, &dsn[4]);
11736 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
11738 static int version_printed;
11739 struct net_device *dev;
11743 if (pci_is_bridge(pdev))
11746 if (version_printed++ == 0)
11747 pr_info("%s", version);
11749 max_irqs = bnxt_get_max_irq(pdev);
11750 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
11754 bp = netdev_priv(dev);
11755 bnxt_set_max_func_irqs(bp, max_irqs);
11757 if (bnxt_vf_pciid(ent->driver_data))
11758 bp->flags |= BNXT_FLAG_VF;
11760 if (pdev->msix_cap)
11761 bp->flags |= BNXT_FLAG_MSIX_CAP;
11763 rc = bnxt_init_board(pdev, dev);
11765 goto init_err_free;
11767 dev->netdev_ops = &bnxt_netdev_ops;
11768 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
11769 dev->ethtool_ops = &bnxt_ethtool_ops;
11770 pci_set_drvdata(pdev, dev);
11772 rc = bnxt_alloc_hwrm_resources(bp);
11774 goto init_err_pci_clean;
11776 mutex_init(&bp->hwrm_cmd_lock);
11777 mutex_init(&bp->link_lock);
11779 rc = bnxt_fw_init_one_p1(bp);
11781 goto init_err_pci_clean;
11783 if (BNXT_CHIP_P5(bp))
11784 bp->flags |= BNXT_FLAG_CHIP_P5;
11786 rc = bnxt_fw_init_one_p2(bp);
11788 goto init_err_pci_clean;
11790 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11791 NETIF_F_TSO | NETIF_F_TSO6 |
11792 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
11793 NETIF_F_GSO_IPXIP4 |
11794 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
11795 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
11796 NETIF_F_RXCSUM | NETIF_F_GRO;
11798 if (BNXT_SUPPORTS_TPA(bp))
11799 dev->hw_features |= NETIF_F_LRO;
11801 dev->hw_enc_features =
11802 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11803 NETIF_F_TSO | NETIF_F_TSO6 |
11804 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
11805 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
11806 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
11807 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
11808 NETIF_F_GSO_GRE_CSUM;
11809 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
11810 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
11811 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
11812 if (BNXT_SUPPORTS_TPA(bp))
11813 dev->hw_features |= NETIF_F_GRO_HW;
11814 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
11815 if (dev->features & NETIF_F_GRO_HW)
11816 dev->features &= ~NETIF_F_LRO;
11817 dev->priv_flags |= IFF_UNICAST_FLT;
11819 #ifdef CONFIG_BNXT_SRIOV
11820 init_waitqueue_head(&bp->sriov_cfg_wait);
11821 mutex_init(&bp->sriov_lock);
11823 if (BNXT_SUPPORTS_TPA(bp)) {
11824 bp->gro_func = bnxt_gro_func_5730x;
11825 if (BNXT_CHIP_P4(bp))
11826 bp->gro_func = bnxt_gro_func_5731x;
11827 else if (BNXT_CHIP_P5(bp))
11828 bp->gro_func = bnxt_gro_func_5750x;
11830 if (!BNXT_CHIP_P4_PLUS(bp))
11831 bp->flags |= BNXT_FLAG_DOUBLE_DB;
11833 bp->ulp_probe = bnxt_ulp_probe;
11835 rc = bnxt_init_mac_addr(bp);
11837 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
11838 rc = -EADDRNOTAVAIL;
11839 goto init_err_pci_clean;
11843 /* Read the adapter's DSN to use as the eswitch switch_id */
11844 rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
11846 goto init_err_pci_clean;
11849 /* MTU range: 60 - FW defined max */
11850 dev->min_mtu = ETH_ZLEN;
11851 dev->max_mtu = bp->max_mtu;
11853 rc = bnxt_probe_phy(bp, true);
11855 goto init_err_pci_clean;
11857 bnxt_set_rx_skb_mode(bp, false);
11858 bnxt_set_tpa_flags(bp);
11859 bnxt_set_ring_params(bp);
11860 rc = bnxt_set_dflt_rings(bp, true);
11862 netdev_err(bp->dev, "Not enough rings available.\n");
11864 goto init_err_pci_clean;
11867 bnxt_fw_init_one_p3(bp);
11869 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
11870 bp->flags |= BNXT_FLAG_STRIP_VLAN;
11872 rc = bnxt_init_int_mode(bp);
11874 goto init_err_pci_clean;
11876 /* No TC has been set yet and rings may have been trimmed due to
11877 * limited MSIX, so we re-initialize the TX rings per TC.
11879 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11884 create_singlethread_workqueue("bnxt_pf_wq");
11886 dev_err(&pdev->dev, "Unable to create workqueue.\n");
11887 goto init_err_pci_clean;
11893 rc = register_netdev(dev);
11895 goto init_err_cleanup_tc;
11898 bnxt_dl_register(bp);
11900 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
11901 board_info[ent->driver_data].name,
11902 (long)pci_resource_start(pdev, 0), dev->dev_addr);
11903 pcie_print_link_status(pdev);
11907 init_err_cleanup_tc:
11908 bnxt_shutdown_tc(bp);
11909 bnxt_clear_int_mode(bp);
11911 init_err_pci_clean:
11912 bnxt_hwrm_func_drv_unrgtr(bp);
11913 bnxt_free_hwrm_short_cmd_req(bp);
11914 bnxt_free_hwrm_resources(bp);
11915 bnxt_free_ctx_mem(bp);
11918 kfree(bp->fw_health);
11919 bp->fw_health = NULL;
11920 bnxt_cleanup_pci(bp);
11927 static void bnxt_shutdown(struct pci_dev *pdev)
11929 struct net_device *dev = pci_get_drvdata(pdev);
11936 bp = netdev_priv(dev);
11938 goto shutdown_exit;
11940 if (netif_running(dev))
11943 bnxt_ulp_shutdown(bp);
11945 if (system_state == SYSTEM_POWER_OFF) {
11946 bnxt_clear_int_mode(bp);
11947 pci_disable_device(pdev);
11948 pci_wake_from_d3(pdev, bp->wol);
11949 pci_set_power_state(pdev, PCI_D3hot);
11956 #ifdef CONFIG_PM_SLEEP
11957 static int bnxt_suspend(struct device *device)
11959 struct net_device *dev = dev_get_drvdata(device);
11960 struct bnxt *bp = netdev_priv(dev);
11965 if (netif_running(dev)) {
11966 netif_device_detach(dev);
11967 rc = bnxt_close(dev);
11969 bnxt_hwrm_func_drv_unrgtr(bp);
11970 pci_disable_device(bp->pdev);
11971 bnxt_free_ctx_mem(bp);
11978 static int bnxt_resume(struct device *device)
11980 struct net_device *dev = dev_get_drvdata(device);
11981 struct bnxt *bp = netdev_priv(dev);
11985 rc = pci_enable_device(bp->pdev);
11987 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
11991 pci_set_master(bp->pdev);
11992 if (bnxt_hwrm_ver_get(bp)) {
11996 rc = bnxt_hwrm_func_reset(bp);
12002 if (bnxt_hwrm_queue_qportcfg(bp)) {
12007 if (bp->hwrm_spec_code >= 0x10803) {
12008 if (bnxt_alloc_ctx_mem(bp)) {
12013 if (BNXT_NEW_RM(bp))
12014 bnxt_hwrm_func_resc_qcaps(bp, false);
12016 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
12021 bnxt_get_wol_settings(bp);
12022 if (netif_running(dev)) {
12023 rc = bnxt_open(dev);
12025 netif_device_attach(dev);
12029 bnxt_ulp_start(bp, rc);
12034 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
12035 #define BNXT_PM_OPS (&bnxt_pm_ops)
12039 #define BNXT_PM_OPS NULL
12041 #endif /* CONFIG_PM_SLEEP */
12044 * bnxt_io_error_detected - called when PCI error is detected
12045 * @pdev: Pointer to PCI device
12046 * @state: The current pci connection state
12048 * This function is called after a PCI bus error affecting
12049 * this device has been detected.
12051 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
12052 pci_channel_state_t state)
12054 struct net_device *netdev = pci_get_drvdata(pdev);
12055 struct bnxt *bp = netdev_priv(netdev);
12057 netdev_info(netdev, "PCI I/O error detected\n");
12060 netif_device_detach(netdev);
12064 if (state == pci_channel_io_perm_failure) {
12066 return PCI_ERS_RESULT_DISCONNECT;
12069 if (netif_running(netdev))
12070 bnxt_close(netdev);
12072 pci_disable_device(pdev);
12075 /* Request a slot slot reset. */
12076 return PCI_ERS_RESULT_NEED_RESET;
12080 * bnxt_io_slot_reset - called after the pci bus has been reset.
12081 * @pdev: Pointer to PCI device
12083 * Restart the card from scratch, as if from a cold-boot.
12084 * At this point, the card has exprienced a hard reset,
12085 * followed by fixups by BIOS, and has its config space
12086 * set up identically to what it was at cold boot.
12088 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
12090 struct net_device *netdev = pci_get_drvdata(pdev);
12091 struct bnxt *bp = netdev_priv(netdev);
12093 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
12095 netdev_info(bp->dev, "PCI Slot Reset\n");
12099 if (pci_enable_device(pdev)) {
12100 dev_err(&pdev->dev,
12101 "Cannot re-enable PCI device after reset.\n");
12103 pci_set_master(pdev);
12105 err = bnxt_hwrm_func_reset(bp);
12106 if (!err && netif_running(netdev))
12107 err = bnxt_open(netdev);
12110 result = PCI_ERS_RESULT_RECOVERED;
12111 bnxt_ulp_start(bp, err);
12114 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
12119 return PCI_ERS_RESULT_RECOVERED;
12123 * bnxt_io_resume - called when traffic can start flowing again.
12124 * @pdev: Pointer to PCI device
12126 * This callback is called when the error recovery driver tells
12127 * us that its OK to resume normal operation.
12129 static void bnxt_io_resume(struct pci_dev *pdev)
12131 struct net_device *netdev = pci_get_drvdata(pdev);
12135 netif_device_attach(netdev);
12140 static const struct pci_error_handlers bnxt_err_handler = {
12141 .error_detected = bnxt_io_error_detected,
12142 .slot_reset = bnxt_io_slot_reset,
12143 .resume = bnxt_io_resume
12146 static struct pci_driver bnxt_pci_driver = {
12147 .name = DRV_MODULE_NAME,
12148 .id_table = bnxt_pci_tbl,
12149 .probe = bnxt_init_one,
12150 .remove = bnxt_remove_one,
12151 .shutdown = bnxt_shutdown,
12152 .driver.pm = BNXT_PM_OPS,
12153 .err_handler = &bnxt_err_handler,
12154 #if defined(CONFIG_BNXT_SRIOV)
12155 .sriov_configure = bnxt_sriov_configure,
12159 static int __init bnxt_init(void)
12162 return pci_register_driver(&bnxt_pci_driver);
12165 static void __exit bnxt_exit(void)
12167 pci_unregister_driver(&bnxt_pci_driver);
12169 destroy_workqueue(bnxt_pf_wq);
12173 module_init(bnxt_init);
12174 module_exit(bnxt_exit);