1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/ctype.h>
12 #include <linux/stringify.h>
13 #include <linux/ethtool.h>
14 #include <linux/linkmode.h>
15 #include <linux/interrupt.h>
16 #include <linux/pci.h>
17 #include <linux/etherdevice.h>
18 #include <linux/crc32.h>
19 #include <linux/firmware.h>
20 #include <linux/utsname.h>
21 #include <linux/time.h>
22 #include <linux/ptp_clock_kernel.h>
23 #include <linux/net_tstamp.h>
24 #include <linux/timecounter.h>
27 #include "bnxt_hwrm.h"
30 #include "bnxt_ethtool.h"
31 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
32 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
33 #include "bnxt_coredump.h"
35 static u32 bnxt_get_msglevel(struct net_device *dev)
37 struct bnxt *bp = netdev_priv(dev);
39 return bp->msg_enable;
42 static void bnxt_set_msglevel(struct net_device *dev, u32 value)
44 struct bnxt *bp = netdev_priv(dev);
46 bp->msg_enable = value;
49 static int bnxt_get_coalesce(struct net_device *dev,
50 struct ethtool_coalesce *coal,
51 struct kernel_ethtool_coalesce *kernel_coal,
52 struct netlink_ext_ack *extack)
54 struct bnxt *bp = netdev_priv(dev);
55 struct bnxt_coal *hw_coal;
58 memset(coal, 0, sizeof(*coal));
60 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
62 hw_coal = &bp->rx_coal;
63 mult = hw_coal->bufs_per_record;
64 coal->rx_coalesce_usecs = hw_coal->coal_ticks;
65 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
66 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
67 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
69 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
70 kernel_coal->use_cqe_mode_rx = true;
72 hw_coal = &bp->tx_coal;
73 mult = hw_coal->bufs_per_record;
74 coal->tx_coalesce_usecs = hw_coal->coal_ticks;
75 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
76 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
77 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
79 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
80 kernel_coal->use_cqe_mode_tx = true;
82 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
87 static int bnxt_set_coalesce(struct net_device *dev,
88 struct ethtool_coalesce *coal,
89 struct kernel_ethtool_coalesce *kernel_coal,
90 struct netlink_ext_ack *extack)
92 struct bnxt *bp = netdev_priv(dev);
93 bool update_stats = false;
94 struct bnxt_coal *hw_coal;
98 if (coal->use_adaptive_rx_coalesce) {
99 bp->flags |= BNXT_FLAG_DIM;
101 if (bp->flags & BNXT_FLAG_DIM) {
102 bp->flags &= ~(BNXT_FLAG_DIM);
107 if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
108 !(bp->coal_cap.cmpl_params &
109 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
112 hw_coal = &bp->rx_coal;
113 mult = hw_coal->bufs_per_record;
114 hw_coal->coal_ticks = coal->rx_coalesce_usecs;
115 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
116 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
117 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
119 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
120 if (kernel_coal->use_cqe_mode_rx)
122 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
124 hw_coal = &bp->tx_coal;
125 mult = hw_coal->bufs_per_record;
126 hw_coal->coal_ticks = coal->tx_coalesce_usecs;
127 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
128 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
129 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
131 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
132 if (kernel_coal->use_cqe_mode_tx)
134 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
136 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
137 u32 stats_ticks = coal->stats_block_coalesce_usecs;
139 /* Allow 0, which means disable. */
141 stats_ticks = clamp_t(u32, stats_ticks,
142 BNXT_MIN_STATS_COAL_TICKS,
143 BNXT_MAX_STATS_COAL_TICKS);
144 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
145 bp->stats_coal_ticks = stats_ticks;
146 if (bp->stats_coal_ticks)
147 bp->current_interval =
148 bp->stats_coal_ticks * HZ / 1000000;
150 bp->current_interval = BNXT_TIMER_INTERVAL;
155 if (netif_running(dev)) {
157 rc = bnxt_close_nic(bp, true, false);
159 rc = bnxt_open_nic(bp, true, false);
161 rc = bnxt_hwrm_set_coal(bp);
168 static const char * const bnxt_ring_rx_stats_str[] = {
179 static const char * const bnxt_ring_tx_stats_str[] = {
190 static const char * const bnxt_ring_tpa_stats_str[] = {
197 static const char * const bnxt_ring_tpa2_stats_str[] = {
198 "rx_tpa_eligible_pkt",
199 "rx_tpa_eligible_bytes",
206 static const char * const bnxt_rx_sw_stats_str[] = {
212 static const char * const bnxt_cmn_sw_stats_str[] = {
216 #define BNXT_RX_STATS_ENTRY(counter) \
217 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
219 #define BNXT_TX_STATS_ENTRY(counter) \
220 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
222 #define BNXT_RX_STATS_EXT_ENTRY(counter) \
223 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
225 #define BNXT_TX_STATS_EXT_ENTRY(counter) \
226 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
228 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \
229 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \
230 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
232 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \
233 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \
234 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
236 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \
237 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \
238 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \
239 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \
240 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \
241 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \
242 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \
243 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \
244 BNXT_RX_STATS_EXT_PFC_ENTRY(7)
246 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \
247 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \
248 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \
249 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \
250 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \
251 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \
252 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \
253 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \
254 BNXT_TX_STATS_EXT_PFC_ENTRY(7)
256 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \
257 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \
258 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
260 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \
261 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \
262 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
264 #define BNXT_RX_STATS_EXT_COS_ENTRIES \
265 BNXT_RX_STATS_EXT_COS_ENTRY(0), \
266 BNXT_RX_STATS_EXT_COS_ENTRY(1), \
267 BNXT_RX_STATS_EXT_COS_ENTRY(2), \
268 BNXT_RX_STATS_EXT_COS_ENTRY(3), \
269 BNXT_RX_STATS_EXT_COS_ENTRY(4), \
270 BNXT_RX_STATS_EXT_COS_ENTRY(5), \
271 BNXT_RX_STATS_EXT_COS_ENTRY(6), \
272 BNXT_RX_STATS_EXT_COS_ENTRY(7) \
274 #define BNXT_TX_STATS_EXT_COS_ENTRIES \
275 BNXT_TX_STATS_EXT_COS_ENTRY(0), \
276 BNXT_TX_STATS_EXT_COS_ENTRY(1), \
277 BNXT_TX_STATS_EXT_COS_ENTRY(2), \
278 BNXT_TX_STATS_EXT_COS_ENTRY(3), \
279 BNXT_TX_STATS_EXT_COS_ENTRY(4), \
280 BNXT_TX_STATS_EXT_COS_ENTRY(5), \
281 BNXT_TX_STATS_EXT_COS_ENTRY(6), \
282 BNXT_TX_STATS_EXT_COS_ENTRY(7) \
284 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \
285 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \
286 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
288 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \
289 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \
290 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \
291 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \
292 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \
293 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \
294 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \
295 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \
296 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
298 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
299 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
300 __stringify(counter##_pri##n) }
302 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \
303 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \
304 __stringify(counter##_pri##n) }
306 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \
307 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \
308 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \
309 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \
310 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \
311 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \
312 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \
313 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \
314 BNXT_RX_STATS_PRI_ENTRY(counter, 7)
316 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \
317 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \
318 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \
319 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \
320 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \
321 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \
322 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \
323 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
324 BNXT_TX_STATS_PRI_ENTRY(counter, 7)
334 char string[ETH_GSTRING_LEN];
335 } bnxt_sw_func_stats[] = {
336 {0, "rx_total_discard_pkts"},
337 {0, "tx_total_discard_pkts"},
338 {0, "rx_total_netpoll_discards"},
341 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
342 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str)
343 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str)
344 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str)
346 static const struct {
348 char string[ETH_GSTRING_LEN];
349 } bnxt_port_stats_arr[] = {
350 BNXT_RX_STATS_ENTRY(rx_64b_frames),
351 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
352 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
353 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
354 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
355 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
356 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
357 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
358 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
359 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
360 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
361 BNXT_RX_STATS_ENTRY(rx_total_frames),
362 BNXT_RX_STATS_ENTRY(rx_ucast_frames),
363 BNXT_RX_STATS_ENTRY(rx_mcast_frames),
364 BNXT_RX_STATS_ENTRY(rx_bcast_frames),
365 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
366 BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
367 BNXT_RX_STATS_ENTRY(rx_pause_frames),
368 BNXT_RX_STATS_ENTRY(rx_pfc_frames),
369 BNXT_RX_STATS_ENTRY(rx_align_err_frames),
370 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
371 BNXT_RX_STATS_ENTRY(rx_jbr_frames),
372 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
373 BNXT_RX_STATS_ENTRY(rx_tagged_frames),
374 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
375 BNXT_RX_STATS_ENTRY(rx_good_frames),
376 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
377 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
378 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
379 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
380 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
381 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
382 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
383 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
384 BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
385 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
386 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
387 BNXT_RX_STATS_ENTRY(rx_bytes),
388 BNXT_RX_STATS_ENTRY(rx_runt_bytes),
389 BNXT_RX_STATS_ENTRY(rx_runt_frames),
390 BNXT_RX_STATS_ENTRY(rx_stat_discard),
391 BNXT_RX_STATS_ENTRY(rx_stat_err),
393 BNXT_TX_STATS_ENTRY(tx_64b_frames),
394 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
395 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
396 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
397 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
398 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
399 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
400 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
401 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
402 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
403 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
404 BNXT_TX_STATS_ENTRY(tx_good_frames),
405 BNXT_TX_STATS_ENTRY(tx_total_frames),
406 BNXT_TX_STATS_ENTRY(tx_ucast_frames),
407 BNXT_TX_STATS_ENTRY(tx_mcast_frames),
408 BNXT_TX_STATS_ENTRY(tx_bcast_frames),
409 BNXT_TX_STATS_ENTRY(tx_pause_frames),
410 BNXT_TX_STATS_ENTRY(tx_pfc_frames),
411 BNXT_TX_STATS_ENTRY(tx_jabber_frames),
412 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
413 BNXT_TX_STATS_ENTRY(tx_err),
414 BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
415 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
416 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
417 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
418 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
419 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
420 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
421 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
422 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
423 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
424 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
425 BNXT_TX_STATS_ENTRY(tx_total_collisions),
426 BNXT_TX_STATS_ENTRY(tx_bytes),
427 BNXT_TX_STATS_ENTRY(tx_xthol_frames),
428 BNXT_TX_STATS_ENTRY(tx_stat_discard),
429 BNXT_TX_STATS_ENTRY(tx_stat_error),
432 static const struct {
434 char string[ETH_GSTRING_LEN];
435 } bnxt_port_stats_ext_arr[] = {
436 BNXT_RX_STATS_EXT_ENTRY(link_down_events),
437 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
438 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
439 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
440 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
441 BNXT_RX_STATS_EXT_COS_ENTRIES,
442 BNXT_RX_STATS_EXT_PFC_ENTRIES,
443 BNXT_RX_STATS_EXT_ENTRY(rx_bits),
444 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
445 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
446 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
447 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
448 BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
449 BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
452 static const struct {
454 char string[ETH_GSTRING_LEN];
455 } bnxt_tx_port_stats_ext_arr[] = {
456 BNXT_TX_STATS_EXT_COS_ENTRIES,
457 BNXT_TX_STATS_EXT_PFC_ENTRIES,
460 static const struct {
462 char string[ETH_GSTRING_LEN];
463 } bnxt_rx_bytes_pri_arr[] = {
464 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
467 static const struct {
469 char string[ETH_GSTRING_LEN];
470 } bnxt_rx_pkts_pri_arr[] = {
471 BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
474 static const struct {
476 char string[ETH_GSTRING_LEN];
477 } bnxt_tx_bytes_pri_arr[] = {
478 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
481 static const struct {
483 char string[ETH_GSTRING_LEN];
484 } bnxt_tx_pkts_pri_arr[] = {
485 BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
488 #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
489 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
490 #define BNXT_NUM_STATS_PRI \
491 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \
492 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
493 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
494 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
496 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
498 if (BNXT_SUPPORTS_TPA(bp)) {
499 if (bp->max_tpa_v2) {
500 if (BNXT_CHIP_P5_THOR(bp))
501 return BNXT_NUM_TPA_RING_STATS_P5;
502 return BNXT_NUM_TPA_RING_STATS_P5_SR2;
504 return BNXT_NUM_TPA_RING_STATS;
509 static int bnxt_get_num_ring_stats(struct bnxt *bp)
513 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
514 bnxt_get_num_tpa_ring_stats(bp);
515 tx = NUM_RING_TX_HW_STATS;
516 cmn = NUM_RING_CMN_SW_STATS;
517 return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
518 cmn * bp->cp_nr_rings;
521 static int bnxt_get_num_stats(struct bnxt *bp)
523 int num_stats = bnxt_get_num_ring_stats(bp);
525 num_stats += BNXT_NUM_SW_FUNC_STATS;
527 if (bp->flags & BNXT_FLAG_PORT_STATS)
528 num_stats += BNXT_NUM_PORT_STATS;
530 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
531 num_stats += bp->fw_rx_stats_ext_size +
532 bp->fw_tx_stats_ext_size;
533 if (bp->pri2cos_valid)
534 num_stats += BNXT_NUM_STATS_PRI;
540 static int bnxt_get_sset_count(struct net_device *dev, int sset)
542 struct bnxt *bp = netdev_priv(dev);
546 return bnxt_get_num_stats(bp);
550 return bp->num_tests;
556 static bool is_rx_ring(struct bnxt *bp, int ring_num)
558 return ring_num < bp->rx_nr_rings;
561 static bool is_tx_ring(struct bnxt *bp, int ring_num)
565 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
566 tx_base = bp->rx_nr_rings;
568 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
573 static void bnxt_get_ethtool_stats(struct net_device *dev,
574 struct ethtool_stats *stats, u64 *buf)
577 struct bnxt *bp = netdev_priv(dev);
581 j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
582 goto skip_ring_stats;
585 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
586 bnxt_sw_func_stats[i].counter = 0;
588 tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
589 for (i = 0; i < bp->cp_nr_rings; i++) {
590 struct bnxt_napi *bnapi = bp->bnapi[i];
591 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
592 u64 *sw_stats = cpr->stats.sw_stats;
596 if (is_rx_ring(bp, i)) {
597 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
598 buf[j] = sw_stats[k];
600 if (is_tx_ring(bp, i)) {
601 k = NUM_RING_RX_HW_STATS;
602 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
604 buf[j] = sw_stats[k];
606 if (!tpa_stats || !is_rx_ring(bp, i))
607 goto skip_tpa_ring_stats;
609 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
610 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
612 buf[j] = sw_stats[k];
615 sw = (u64 *)&cpr->sw_stats.rx;
616 if (is_rx_ring(bp, i)) {
617 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
621 sw = (u64 *)&cpr->sw_stats.cmn;
622 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
625 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
626 BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
627 bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
628 BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
629 bnxt_sw_func_stats[RX_NETPOLL_DISCARDS].counter +=
630 cpr->sw_stats.rx.rx_netpoll_discards;
633 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
634 buf[j] = bnxt_sw_func_stats[i].counter;
637 if (bp->flags & BNXT_FLAG_PORT_STATS) {
638 u64 *port_stats = bp->port_stats.sw_stats;
640 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
641 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
643 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
644 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
645 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
647 for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
648 buf[j] = *(rx_port_stats_ext +
649 bnxt_port_stats_ext_arr[i].offset);
651 for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
652 buf[j] = *(tx_port_stats_ext +
653 bnxt_tx_port_stats_ext_arr[i].offset);
655 if (bp->pri2cos_valid) {
656 for (i = 0; i < 8; i++, j++) {
657 long n = bnxt_rx_bytes_pri_arr[i].base_off +
660 buf[j] = *(rx_port_stats_ext + n);
662 for (i = 0; i < 8; i++, j++) {
663 long n = bnxt_rx_pkts_pri_arr[i].base_off +
666 buf[j] = *(rx_port_stats_ext + n);
668 for (i = 0; i < 8; i++, j++) {
669 long n = bnxt_tx_bytes_pri_arr[i].base_off +
672 buf[j] = *(tx_port_stats_ext + n);
674 for (i = 0; i < 8; i++, j++) {
675 long n = bnxt_tx_pkts_pri_arr[i].base_off +
678 buf[j] = *(tx_port_stats_ext + n);
684 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
686 struct bnxt *bp = netdev_priv(dev);
687 static const char * const *str;
692 for (i = 0; i < bp->cp_nr_rings; i++) {
693 if (is_rx_ring(bp, i)) {
694 num_str = NUM_RING_RX_HW_STATS;
695 for (j = 0; j < num_str; j++) {
696 sprintf(buf, "[%d]: %s", i,
697 bnxt_ring_rx_stats_str[j]);
698 buf += ETH_GSTRING_LEN;
701 if (is_tx_ring(bp, i)) {
702 num_str = NUM_RING_TX_HW_STATS;
703 for (j = 0; j < num_str; j++) {
704 sprintf(buf, "[%d]: %s", i,
705 bnxt_ring_tx_stats_str[j]);
706 buf += ETH_GSTRING_LEN;
709 num_str = bnxt_get_num_tpa_ring_stats(bp);
710 if (!num_str || !is_rx_ring(bp, i))
714 str = bnxt_ring_tpa2_stats_str;
716 str = bnxt_ring_tpa_stats_str;
718 for (j = 0; j < num_str; j++) {
719 sprintf(buf, "[%d]: %s", i, str[j]);
720 buf += ETH_GSTRING_LEN;
723 if (is_rx_ring(bp, i)) {
724 num_str = NUM_RING_RX_SW_STATS;
725 for (j = 0; j < num_str; j++) {
726 sprintf(buf, "[%d]: %s", i,
727 bnxt_rx_sw_stats_str[j]);
728 buf += ETH_GSTRING_LEN;
731 num_str = NUM_RING_CMN_SW_STATS;
732 for (j = 0; j < num_str; j++) {
733 sprintf(buf, "[%d]: %s", i,
734 bnxt_cmn_sw_stats_str[j]);
735 buf += ETH_GSTRING_LEN;
738 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
739 strcpy(buf, bnxt_sw_func_stats[i].string);
740 buf += ETH_GSTRING_LEN;
743 if (bp->flags & BNXT_FLAG_PORT_STATS) {
744 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
745 strcpy(buf, bnxt_port_stats_arr[i].string);
746 buf += ETH_GSTRING_LEN;
749 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
750 for (i = 0; i < bp->fw_rx_stats_ext_size; i++) {
751 strcpy(buf, bnxt_port_stats_ext_arr[i].string);
752 buf += ETH_GSTRING_LEN;
754 for (i = 0; i < bp->fw_tx_stats_ext_size; i++) {
756 bnxt_tx_port_stats_ext_arr[i].string);
757 buf += ETH_GSTRING_LEN;
759 if (bp->pri2cos_valid) {
760 for (i = 0; i < 8; i++) {
762 bnxt_rx_bytes_pri_arr[i].string);
763 buf += ETH_GSTRING_LEN;
765 for (i = 0; i < 8; i++) {
767 bnxt_rx_pkts_pri_arr[i].string);
768 buf += ETH_GSTRING_LEN;
770 for (i = 0; i < 8; i++) {
772 bnxt_tx_bytes_pri_arr[i].string);
773 buf += ETH_GSTRING_LEN;
775 for (i = 0; i < 8; i++) {
777 bnxt_tx_pkts_pri_arr[i].string);
778 buf += ETH_GSTRING_LEN;
785 memcpy(buf, bp->test_info->string,
786 bp->num_tests * ETH_GSTRING_LEN);
789 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
795 static void bnxt_get_ringparam(struct net_device *dev,
796 struct ethtool_ringparam *ering,
797 struct kernel_ethtool_ringparam *kernel_ering,
798 struct netlink_ext_ack *extack)
800 struct bnxt *bp = netdev_priv(dev);
802 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
803 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
804 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
806 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
807 ering->rx_jumbo_max_pending = 0;
809 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
811 ering->rx_pending = bp->rx_ring_size;
812 ering->rx_jumbo_pending = bp->rx_agg_ring_size;
813 ering->tx_pending = bp->tx_ring_size;
816 static int bnxt_set_ringparam(struct net_device *dev,
817 struct ethtool_ringparam *ering,
818 struct kernel_ethtool_ringparam *kernel_ering,
819 struct netlink_ext_ack *extack)
821 struct bnxt *bp = netdev_priv(dev);
823 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
824 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
825 (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
828 if (netif_running(dev))
829 bnxt_close_nic(bp, false, false);
831 bp->rx_ring_size = ering->rx_pending;
832 bp->tx_ring_size = ering->tx_pending;
833 bnxt_set_ring_params(bp);
835 if (netif_running(dev))
836 return bnxt_open_nic(bp, false, false);
841 static void bnxt_get_channels(struct net_device *dev,
842 struct ethtool_channels *channel)
844 struct bnxt *bp = netdev_priv(dev);
845 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
846 int max_rx_rings, max_tx_rings, tcs;
847 int max_tx_sch_inputs, tx_grps;
849 /* Get the most up-to-date max_tx_sch_inputs. */
850 if (netif_running(dev) && BNXT_NEW_RM(bp))
851 bnxt_hwrm_func_resc_qcaps(bp, false);
852 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
854 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
855 if (max_tx_sch_inputs)
856 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
858 tcs = netdev_get_num_tc(dev);
859 tx_grps = max(tcs, 1);
860 if (bp->tx_nr_rings_xdp)
862 max_tx_rings /= tx_grps;
863 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
865 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
869 if (max_tx_sch_inputs)
870 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
875 channel->max_rx = max_rx_rings;
876 channel->max_tx = max_tx_rings;
877 channel->max_other = 0;
878 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
879 channel->combined_count = bp->rx_nr_rings;
880 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
881 channel->combined_count--;
883 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
884 channel->rx_count = bp->rx_nr_rings;
885 channel->tx_count = bp->tx_nr_rings_per_tc;
890 static int bnxt_set_channels(struct net_device *dev,
891 struct ethtool_channels *channel)
893 struct bnxt *bp = netdev_priv(dev);
894 int req_tx_rings, req_rx_rings, tcs;
899 if (channel->other_count)
902 if (!channel->combined_count &&
903 (!channel->rx_count || !channel->tx_count))
906 if (channel->combined_count &&
907 (channel->rx_count || channel->tx_count))
910 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
914 if (channel->combined_count)
917 tcs = netdev_get_num_tc(dev);
919 req_tx_rings = sh ? channel->combined_count : channel->tx_count;
920 req_rx_rings = sh ? channel->combined_count : channel->rx_count;
921 if (bp->tx_nr_rings_xdp) {
923 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
926 tx_xdp = req_rx_rings;
928 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
930 netdev_warn(dev, "Unable to allocate the requested rings\n");
934 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
935 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
936 netif_is_rxfh_configured(dev)) {
937 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
941 if (netif_running(dev)) {
943 /* TODO CHIMP_FW: Send message to all VF's
947 rc = bnxt_close_nic(bp, true, false);
949 netdev_err(bp->dev, "Set channel failure rc :%x\n",
956 bp->flags |= BNXT_FLAG_SHARED_RINGS;
957 bp->rx_nr_rings = channel->combined_count;
958 bp->tx_nr_rings_per_tc = channel->combined_count;
960 bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
961 bp->rx_nr_rings = channel->rx_count;
962 bp->tx_nr_rings_per_tc = channel->tx_count;
964 bp->tx_nr_rings_xdp = tx_xdp;
965 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
967 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
969 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
970 bp->tx_nr_rings + bp->rx_nr_rings;
972 /* After changing number of rx channels, update NTUPLE feature. */
973 netdev_update_features(dev);
974 if (netif_running(dev)) {
975 rc = bnxt_open_nic(bp, true, false);
976 if ((!rc) && BNXT_PF(bp)) {
977 /* TODO CHIMP_FW: Send message to all VF's
982 rc = bnxt_reserve_rings(bp, true);
988 #ifdef CONFIG_RFS_ACCEL
989 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
994 cmd->data = bp->ntp_fltr_count;
995 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
996 struct hlist_head *head;
997 struct bnxt_ntuple_filter *fltr;
999 head = &bp->ntp_fltr_hash_tbl[i];
1001 hlist_for_each_entry_rcu(fltr, head, hash) {
1002 if (j == cmd->rule_cnt)
1004 rule_locs[j++] = fltr->sw_id;
1007 if (j == cmd->rule_cnt)
1014 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1016 struct ethtool_rx_flow_spec *fs =
1017 (struct ethtool_rx_flow_spec *)&cmd->fs;
1018 struct bnxt_ntuple_filter *fltr;
1019 struct flow_keys *fkeys;
1020 int i, rc = -EINVAL;
1022 if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
1025 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
1026 struct hlist_head *head;
1028 head = &bp->ntp_fltr_hash_tbl[i];
1030 hlist_for_each_entry_rcu(fltr, head, hash) {
1031 if (fltr->sw_id == fs->location)
1039 fkeys = &fltr->fkeys;
1040 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
1041 if (fkeys->basic.ip_proto == IPPROTO_TCP)
1042 fs->flow_type = TCP_V4_FLOW;
1043 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1044 fs->flow_type = UDP_V4_FLOW;
1048 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
1049 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
1051 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
1052 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
1054 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
1055 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
1057 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
1058 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
1062 if (fkeys->basic.ip_proto == IPPROTO_TCP)
1063 fs->flow_type = TCP_V6_FLOW;
1064 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1065 fs->flow_type = UDP_V6_FLOW;
1069 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
1070 fkeys->addrs.v6addrs.src;
1071 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
1072 fkeys->addrs.v6addrs.dst;
1073 for (i = 0; i < 4; i++) {
1074 fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
1075 fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
1077 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
1078 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
1080 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
1081 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
1084 fs->ring_cookie = fltr->rxq;
1094 static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1096 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1097 return RXH_IP_SRC | RXH_IP_DST;
1101 static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1103 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1104 return RXH_IP_SRC | RXH_IP_DST;
1108 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1111 switch (cmd->flow_type) {
1113 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1114 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1115 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1116 cmd->data |= get_ethtool_ipv4_rss(bp);
1119 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1120 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1121 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1124 case AH_ESP_V4_FLOW:
1128 cmd->data |= get_ethtool_ipv4_rss(bp);
1132 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1133 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1134 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1135 cmd->data |= get_ethtool_ipv6_rss(bp);
1138 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1139 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1140 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1143 case AH_ESP_V6_FLOW:
1147 cmd->data |= get_ethtool_ipv6_rss(bp);
1153 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1154 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1156 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1158 u32 rss_hash_cfg = bp->rss_hash_cfg;
1161 if (cmd->data == RXH_4TUPLE)
1163 else if (cmd->data == RXH_2TUPLE)
1165 else if (!cmd->data)
1170 if (cmd->flow_type == TCP_V4_FLOW) {
1171 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1173 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1174 } else if (cmd->flow_type == UDP_V4_FLOW) {
1175 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1177 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1179 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1180 } else if (cmd->flow_type == TCP_V6_FLOW) {
1181 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1183 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1184 } else if (cmd->flow_type == UDP_V6_FLOW) {
1185 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1187 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1189 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1190 } else if (tuple == 4) {
1194 switch (cmd->flow_type) {
1198 case AH_ESP_V4_FLOW:
1203 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1205 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1211 case AH_ESP_V6_FLOW:
1216 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1218 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1222 if (bp->rss_hash_cfg == rss_hash_cfg)
1225 bp->rss_hash_cfg = rss_hash_cfg;
1226 if (netif_running(bp->dev)) {
1227 bnxt_close_nic(bp, false, false);
1228 rc = bnxt_open_nic(bp, false, false);
1233 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1236 struct bnxt *bp = netdev_priv(dev);
1240 #ifdef CONFIG_RFS_ACCEL
1241 case ETHTOOL_GRXRINGS:
1242 cmd->data = bp->rx_nr_rings;
1245 case ETHTOOL_GRXCLSRLCNT:
1246 cmd->rule_cnt = bp->ntp_fltr_count;
1247 cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
1250 case ETHTOOL_GRXCLSRLALL:
1251 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1254 case ETHTOOL_GRXCLSRULE:
1255 rc = bnxt_grxclsrule(bp, cmd);
1260 rc = bnxt_grxfh(bp, cmd);
1271 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1273 struct bnxt *bp = netdev_priv(dev);
1278 rc = bnxt_srxfh(bp, cmd);
1288 u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
1290 struct bnxt *bp = netdev_priv(dev);
1292 if (bp->flags & BNXT_FLAG_CHIP_P5)
1293 return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
1294 return HW_HASH_INDEX_SIZE;
1297 static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1299 return HW_HASH_KEY_SIZE;
1302 static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1305 struct bnxt *bp = netdev_priv(dev);
1306 struct bnxt_vnic_info *vnic;
1310 *hfunc = ETH_RSS_HASH_TOP;
1315 vnic = &bp->vnic_info[0];
1316 if (indir && bp->rss_indir_tbl) {
1317 tbl_size = bnxt_get_rxfh_indir_size(dev);
1318 for (i = 0; i < tbl_size; i++)
1319 indir[i] = bp->rss_indir_tbl[i];
1322 if (key && vnic->rss_hash_key)
1323 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1328 static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
1329 const u8 *key, const u8 hfunc)
1331 struct bnxt *bp = netdev_priv(dev);
1334 if (hfunc && hfunc != ETH_RSS_HASH_TOP)
1341 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
1343 for (i = 0; i < tbl_size; i++)
1344 bp->rss_indir_tbl[i] = indir[i];
1345 pad = bp->rss_indir_tbl_entries - tbl_size;
1347 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
1350 if (netif_running(bp->dev)) {
1351 bnxt_close_nic(bp, false, false);
1352 rc = bnxt_open_nic(bp, false, false);
1357 static void bnxt_get_drvinfo(struct net_device *dev,
1358 struct ethtool_drvinfo *info)
1360 struct bnxt *bp = netdev_priv(dev);
1362 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1363 strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
1364 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
1365 info->n_stats = bnxt_get_num_stats(bp);
1366 info->testinfo_len = bp->num_tests;
1367 /* TODO CHIMP_FW: eeprom dump details */
1368 info->eedump_len = 0;
1369 /* TODO CHIMP FW: reg dump details */
1370 info->regdump_len = 0;
1373 static int bnxt_get_regs_len(struct net_device *dev)
1375 struct bnxt *bp = netdev_priv(dev);
1381 reg_len = BNXT_PXP_REG_LEN;
1383 if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
1384 reg_len += sizeof(struct pcie_ctx_hw_stats);
1389 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1392 struct pcie_ctx_hw_stats *hw_pcie_stats;
1393 struct hwrm_pcie_qstats_input *req;
1394 struct bnxt *bp = netdev_priv(dev);
1395 dma_addr_t hw_pcie_stats_addr;
1399 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
1401 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
1404 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
1407 hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
1408 &hw_pcie_stats_addr);
1409 if (!hw_pcie_stats) {
1410 hwrm_req_drop(bp, req);
1415 hwrm_req_hold(bp, req); /* hold on to slice */
1416 req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
1417 req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
1418 rc = hwrm_req_send(bp, req);
1420 __le64 *src = (__le64 *)hw_pcie_stats;
1421 u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
1424 for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
1425 dst[i] = le64_to_cpu(src[i]);
1427 hwrm_req_drop(bp, req);
1430 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1432 struct bnxt *bp = netdev_priv(dev);
1436 memset(&wol->sopass, 0, sizeof(wol->sopass));
1437 if (bp->flags & BNXT_FLAG_WOL_CAP) {
1438 wol->supported = WAKE_MAGIC;
1440 wol->wolopts = WAKE_MAGIC;
1444 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1446 struct bnxt *bp = netdev_priv(dev);
1448 if (wol->wolopts & ~WAKE_MAGIC)
1451 if (wol->wolopts & WAKE_MAGIC) {
1452 if (!(bp->flags & BNXT_FLAG_WOL_CAP))
1455 if (bnxt_hwrm_alloc_wol_fltr(bp))
1461 if (bnxt_hwrm_free_wol_fltr(bp))
1469 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
1473 /* TODO: support 25GB, 40GB, 50GB with different cable type */
1474 /* set the advertised speeds */
1475 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
1476 speed_mask |= ADVERTISED_100baseT_Full;
1477 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
1478 speed_mask |= ADVERTISED_1000baseT_Full;
1479 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
1480 speed_mask |= ADVERTISED_2500baseX_Full;
1481 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
1482 speed_mask |= ADVERTISED_10000baseT_Full;
1483 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
1484 speed_mask |= ADVERTISED_40000baseCR4_Full;
1486 if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
1487 speed_mask |= ADVERTISED_Pause;
1488 else if (fw_pause & BNXT_LINK_PAUSE_TX)
1489 speed_mask |= ADVERTISED_Asym_Pause;
1490 else if (fw_pause & BNXT_LINK_PAUSE_RX)
1491 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1496 #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
1498 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \
1499 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1501 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \
1502 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1504 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \
1505 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1507 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \
1508 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1509 25000baseCR_Full); \
1510 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \
1511 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1512 40000baseCR4_Full);\
1513 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
1514 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1515 50000baseCR2_Full);\
1516 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \
1517 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1518 100000baseCR4_Full);\
1519 if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
1520 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1522 if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \
1523 ethtool_link_ksettings_add_link_mode( \
1524 lk_ksettings, name, Asym_Pause);\
1525 } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \
1526 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1531 #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \
1533 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1535 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1537 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \
1538 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1539 1000baseT_Full) || \
1540 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1542 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \
1543 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1545 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \
1546 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1547 25000baseCR_Full)) \
1548 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \
1549 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1550 40000baseCR4_Full)) \
1551 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \
1552 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1553 50000baseCR2_Full)) \
1554 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
1555 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1556 100000baseCR4_Full)) \
1557 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \
1560 #define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
1562 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB) \
1563 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1564 50000baseCR_Full); \
1565 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB) \
1566 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1567 100000baseCR2_Full);\
1568 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB) \
1569 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1570 200000baseCR4_Full);\
1573 #define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
1575 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1576 50000baseCR_Full)) \
1577 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB; \
1578 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1579 100000baseCR2_Full)) \
1580 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB; \
1581 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1582 200000baseCR4_Full)) \
1583 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB; \
1586 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
1587 struct ethtool_link_ksettings *lk_ksettings)
1589 u16 fec_cfg = link_info->fec_cfg;
1591 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
1592 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1593 lk_ksettings->link_modes.advertising);
1596 if (fec_cfg & BNXT_FEC_ENC_BASE_R)
1597 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1598 lk_ksettings->link_modes.advertising);
1599 if (fec_cfg & BNXT_FEC_ENC_RS)
1600 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1601 lk_ksettings->link_modes.advertising);
1602 if (fec_cfg & BNXT_FEC_ENC_LLRS)
1603 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
1604 lk_ksettings->link_modes.advertising);
1607 static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
1608 struct ethtool_link_ksettings *lk_ksettings)
1610 u16 fw_speeds = link_info->advertising;
1613 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1614 fw_pause = link_info->auto_pause_setting;
1616 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
1617 fw_speeds = link_info->advertising_pam4;
1618 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising);
1619 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
1622 static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
1623 struct ethtool_link_ksettings *lk_ksettings)
1625 u16 fw_speeds = link_info->lp_auto_link_speeds;
1628 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1629 fw_pause = link_info->lp_pause;
1631 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
1633 fw_speeds = link_info->lp_auto_pam4_link_speeds;
1634 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising);
1637 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
1638 struct ethtool_link_ksettings *lk_ksettings)
1640 u16 fec_cfg = link_info->fec_cfg;
1642 if (fec_cfg & BNXT_FEC_NONE) {
1643 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1644 lk_ksettings->link_modes.supported);
1647 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
1648 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1649 lk_ksettings->link_modes.supported);
1650 if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
1651 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1652 lk_ksettings->link_modes.supported);
1653 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
1654 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
1655 lk_ksettings->link_modes.supported);
1658 static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
1659 struct ethtool_link_ksettings *lk_ksettings)
1661 u16 fw_speeds = link_info->support_speeds;
1663 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
1664 fw_speeds = link_info->support_pam4_speeds;
1665 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
1667 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
1668 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1671 if (link_info->support_auto_speeds ||
1672 link_info->support_pam4_auto_speeds)
1673 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1675 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
1678 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
1680 switch (fw_link_speed) {
1681 case BNXT_LINK_SPEED_100MB:
1683 case BNXT_LINK_SPEED_1GB:
1685 case BNXT_LINK_SPEED_2_5GB:
1687 case BNXT_LINK_SPEED_10GB:
1689 case BNXT_LINK_SPEED_20GB:
1691 case BNXT_LINK_SPEED_25GB:
1693 case BNXT_LINK_SPEED_40GB:
1695 case BNXT_LINK_SPEED_50GB:
1697 case BNXT_LINK_SPEED_100GB:
1698 return SPEED_100000;
1700 return SPEED_UNKNOWN;
1704 static int bnxt_get_link_ksettings(struct net_device *dev,
1705 struct ethtool_link_ksettings *lk_ksettings)
1707 struct bnxt *bp = netdev_priv(dev);
1708 struct bnxt_link_info *link_info = &bp->link_info;
1709 struct ethtool_link_settings *base = &lk_ksettings->base;
1712 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1713 mutex_lock(&bp->link_lock);
1714 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
1716 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
1717 if (link_info->autoneg) {
1718 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
1719 ethtool_link_ksettings_add_link_mode(lk_ksettings,
1720 advertising, Autoneg);
1721 base->autoneg = AUTONEG_ENABLE;
1722 base->duplex = DUPLEX_UNKNOWN;
1723 if (link_info->phy_link_status == BNXT_LINK_LINK) {
1724 bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
1725 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
1726 base->duplex = DUPLEX_FULL;
1728 base->duplex = DUPLEX_HALF;
1730 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
1732 base->autoneg = AUTONEG_DISABLE;
1734 bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
1735 base->duplex = DUPLEX_HALF;
1736 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
1737 base->duplex = DUPLEX_FULL;
1739 base->speed = ethtool_speed;
1741 base->port = PORT_NONE;
1742 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1743 base->port = PORT_TP;
1744 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1746 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1749 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1751 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1754 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
1755 base->port = PORT_DA;
1756 else if (link_info->media_type ==
1757 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
1758 base->port = PORT_FIBRE;
1760 base->phy_address = link_info->phy_addr;
1761 mutex_unlock(&bp->link_lock);
1766 static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
1768 struct bnxt *bp = netdev_priv(dev);
1769 struct bnxt_link_info *link_info = &bp->link_info;
1770 u16 support_pam4_spds = link_info->support_pam4_speeds;
1771 u16 support_spds = link_info->support_speeds;
1772 u8 sig_mode = BNXT_SIG_MODE_NRZ;
1775 switch (ethtool_speed) {
1777 if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
1778 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
1781 if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
1782 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
1785 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
1786 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
1789 if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
1790 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
1793 if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
1794 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
1797 if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
1798 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
1801 if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
1802 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
1805 if (support_spds & BNXT_LINK_SPEED_MSK_50GB) {
1806 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
1807 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
1808 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
1809 sig_mode = BNXT_SIG_MODE_PAM4;
1813 if (support_spds & BNXT_LINK_SPEED_MSK_100GB) {
1814 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
1815 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
1816 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
1817 sig_mode = BNXT_SIG_MODE_PAM4;
1821 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
1822 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
1823 sig_mode = BNXT_SIG_MODE_PAM4;
1829 netdev_err(dev, "unsupported speed!\n");
1833 if (link_info->req_link_speed == fw_speed &&
1834 link_info->req_signal_mode == sig_mode &&
1835 link_info->autoneg == 0)
1838 link_info->req_link_speed = fw_speed;
1839 link_info->req_signal_mode = sig_mode;
1840 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
1841 link_info->autoneg = 0;
1842 link_info->advertising = 0;
1843 link_info->advertising_pam4 = 0;
1848 u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
1850 u16 fw_speed_mask = 0;
1852 /* only support autoneg at speed 100, 1000, and 10000 */
1853 if (advertising & (ADVERTISED_100baseT_Full |
1854 ADVERTISED_100baseT_Half)) {
1855 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
1857 if (advertising & (ADVERTISED_1000baseT_Full |
1858 ADVERTISED_1000baseT_Half)) {
1859 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
1861 if (advertising & ADVERTISED_10000baseT_Full)
1862 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
1864 if (advertising & ADVERTISED_40000baseCR4_Full)
1865 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
1867 return fw_speed_mask;
1870 static int bnxt_set_link_ksettings(struct net_device *dev,
1871 const struct ethtool_link_ksettings *lk_ksettings)
1873 struct bnxt *bp = netdev_priv(dev);
1874 struct bnxt_link_info *link_info = &bp->link_info;
1875 const struct ethtool_link_settings *base = &lk_ksettings->base;
1876 bool set_pause = false;
1880 if (!BNXT_PHY_CFG_ABLE(bp))
1883 mutex_lock(&bp->link_lock);
1884 if (base->autoneg == AUTONEG_ENABLE) {
1885 link_info->advertising = 0;
1886 link_info->advertising_pam4 = 0;
1887 BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings,
1889 BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4,
1890 lk_ksettings, advertising);
1891 link_info->autoneg |= BNXT_AUTONEG_SPEED;
1892 if (!link_info->advertising && !link_info->advertising_pam4) {
1893 link_info->advertising = link_info->support_auto_speeds;
1894 link_info->advertising_pam4 =
1895 link_info->support_pam4_auto_speeds;
1897 /* any change to autoneg will cause link change, therefore the
1898 * driver should put back the original pause setting in autoneg
1902 u8 phy_type = link_info->phy_type;
1904 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
1905 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
1906 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1907 netdev_err(dev, "10GBase-T devices must autoneg\n");
1909 goto set_setting_exit;
1911 if (base->duplex == DUPLEX_HALF) {
1912 netdev_err(dev, "HALF DUPLEX is not supported!\n");
1914 goto set_setting_exit;
1916 speed = base->speed;
1917 rc = bnxt_force_link_speed(dev, speed);
1919 if (rc == -EALREADY)
1921 goto set_setting_exit;
1925 if (netif_running(dev))
1926 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1929 mutex_unlock(&bp->link_lock);
1933 static int bnxt_get_fecparam(struct net_device *dev,
1934 struct ethtool_fecparam *fec)
1936 struct bnxt *bp = netdev_priv(dev);
1937 struct bnxt_link_info *link_info;
1941 link_info = &bp->link_info;
1942 fec_cfg = link_info->fec_cfg;
1943 active_fec = link_info->active_fec_sig_mode &
1944 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
1945 if (fec_cfg & BNXT_FEC_NONE) {
1946 fec->fec = ETHTOOL_FEC_NONE;
1947 fec->active_fec = ETHTOOL_FEC_NONE;
1950 if (fec_cfg & BNXT_FEC_AUTONEG)
1951 fec->fec |= ETHTOOL_FEC_AUTO;
1952 if (fec_cfg & BNXT_FEC_ENC_BASE_R)
1953 fec->fec |= ETHTOOL_FEC_BASER;
1954 if (fec_cfg & BNXT_FEC_ENC_RS)
1955 fec->fec |= ETHTOOL_FEC_RS;
1956 if (fec_cfg & BNXT_FEC_ENC_LLRS)
1957 fec->fec |= ETHTOOL_FEC_LLRS;
1959 switch (active_fec) {
1960 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
1961 fec->active_fec |= ETHTOOL_FEC_BASER;
1963 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
1964 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
1965 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
1966 fec->active_fec |= ETHTOOL_FEC_RS;
1968 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
1969 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
1970 fec->active_fec |= ETHTOOL_FEC_LLRS;
1976 static void bnxt_get_fec_stats(struct net_device *dev,
1977 struct ethtool_fec_stats *fec_stats)
1979 struct bnxt *bp = netdev_priv(dev);
1982 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
1985 rx = bp->rx_port_stats_ext.sw_stats;
1986 fec_stats->corrected_bits.total =
1987 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
1990 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
1993 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
1995 if (fec & ETHTOOL_FEC_BASER)
1996 fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
1997 else if (fec & ETHTOOL_FEC_RS)
1998 fw_fec |= BNXT_FEC_RS_ON(link_info);
1999 else if (fec & ETHTOOL_FEC_LLRS)
2000 fw_fec |= BNXT_FEC_LLRS_ON;
2004 static int bnxt_set_fecparam(struct net_device *dev,
2005 struct ethtool_fecparam *fecparam)
2007 struct hwrm_port_phy_cfg_input *req;
2008 struct bnxt *bp = netdev_priv(dev);
2009 struct bnxt_link_info *link_info;
2010 u32 new_cfg, fec = fecparam->fec;
2014 link_info = &bp->link_info;
2015 fec_cfg = link_info->fec_cfg;
2016 if (fec_cfg & BNXT_FEC_NONE)
2019 if (fec & ETHTOOL_FEC_OFF) {
2020 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
2021 BNXT_FEC_ALL_OFF(link_info);
2024 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
2025 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
2026 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
2027 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
2030 if (fec & ETHTOOL_FEC_AUTO) {
2031 if (!link_info->autoneg)
2033 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
2035 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
2039 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
2042 req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
2043 rc = hwrm_req_send(bp, req);
2044 /* update current settings */
2046 mutex_lock(&bp->link_lock);
2047 bnxt_update_link(bp, false);
2048 mutex_unlock(&bp->link_lock);
2053 static void bnxt_get_pauseparam(struct net_device *dev,
2054 struct ethtool_pauseparam *epause)
2056 struct bnxt *bp = netdev_priv(dev);
2057 struct bnxt_link_info *link_info = &bp->link_info;
2061 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
2062 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
2063 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
2066 static void bnxt_get_pause_stats(struct net_device *dev,
2067 struct ethtool_pause_stats *epstat)
2069 struct bnxt *bp = netdev_priv(dev);
2072 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
2075 rx = bp->port_stats.sw_stats;
2076 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
2078 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
2079 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
2082 static int bnxt_set_pauseparam(struct net_device *dev,
2083 struct ethtool_pauseparam *epause)
2086 struct bnxt *bp = netdev_priv(dev);
2087 struct bnxt_link_info *link_info = &bp->link_info;
2089 if (!BNXT_PHY_CFG_ABLE(bp))
2092 mutex_lock(&bp->link_lock);
2093 if (epause->autoneg) {
2094 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2099 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
2100 if (bp->hwrm_spec_code >= 0x10201)
2101 link_info->req_flow_ctrl =
2102 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
2104 /* when transition from auto pause to force pause,
2105 * force a link change
2107 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2108 link_info->force_link_chng = true;
2109 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
2110 link_info->req_flow_ctrl = 0;
2112 if (epause->rx_pause)
2113 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
2115 if (epause->tx_pause)
2116 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
2118 if (netif_running(dev))
2119 rc = bnxt_hwrm_set_pause(bp);
2122 mutex_unlock(&bp->link_lock);
2126 static u32 bnxt_get_link(struct net_device *dev)
2128 struct bnxt *bp = netdev_priv(dev);
2130 /* TODO: handle MF, VF, driver close case */
2131 return bp->link_info.link_up;
2134 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
2135 struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
2137 struct hwrm_nvm_get_dev_info_output *resp;
2138 struct hwrm_nvm_get_dev_info_input *req;
2144 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO);
2148 resp = hwrm_req_hold(bp, req);
2149 rc = hwrm_req_send(bp, req);
2151 memcpy(nvm_dev_info, resp, sizeof(*resp));
2152 hwrm_req_drop(bp, req);
2156 static void bnxt_print_admin_err(struct bnxt *bp)
2158 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
2161 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2162 u16 ext, u16 *index, u32 *item_length,
2165 static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
2166 u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
2167 u32 dir_item_len, const u8 *data,
2170 struct bnxt *bp = netdev_priv(dev);
2171 struct hwrm_nvm_write_input *req;
2174 rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE);
2178 if (data_len && data) {
2179 dma_addr_t dma_handle;
2182 kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle);
2184 hwrm_req_drop(bp, req);
2188 req->dir_data_length = cpu_to_le32(data_len);
2190 memcpy(kmem, data, data_len);
2191 req->host_src_addr = cpu_to_le64(dma_handle);
2194 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
2195 req->dir_type = cpu_to_le16(dir_type);
2196 req->dir_ordinal = cpu_to_le16(dir_ordinal);
2197 req->dir_ext = cpu_to_le16(dir_ext);
2198 req->dir_attr = cpu_to_le16(dir_attr);
2199 req->dir_item_length = cpu_to_le32(dir_item_len);
2200 rc = hwrm_req_send(bp, req);
2203 bnxt_print_admin_err(bp);
2207 int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
2208 u8 self_reset, u8 flags)
2210 struct bnxt *bp = netdev_priv(dev);
2211 struct hwrm_fw_reset_input *req;
2214 if (!bnxt_hwrm_reset_permitted(bp)) {
2215 netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
2219 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
2223 req->embedded_proc_type = proc_type;
2224 req->selfrst_status = self_reset;
2227 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
2228 rc = hwrm_req_send_silent(bp, req);
2230 rc = hwrm_req_send(bp, req);
2232 bnxt_print_admin_err(bp);
2237 static int bnxt_firmware_reset(struct net_device *dev,
2238 enum bnxt_nvm_directory_type dir_type)
2240 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
2241 u8 proc_type, flags = 0;
2243 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
2244 /* (e.g. when firmware isn't already running) */
2246 case BNX_DIR_TYPE_CHIMP_PATCH:
2247 case BNX_DIR_TYPE_BOOTCODE:
2248 case BNX_DIR_TYPE_BOOTCODE_2:
2249 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
2250 /* Self-reset ChiMP upon next PCIe reset: */
2251 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
2253 case BNX_DIR_TYPE_APE_FW:
2254 case BNX_DIR_TYPE_APE_PATCH:
2255 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
2256 /* Self-reset APE upon next PCIe reset: */
2257 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
2259 case BNX_DIR_TYPE_KONG_FW:
2260 case BNX_DIR_TYPE_KONG_PATCH:
2261 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
2263 case BNX_DIR_TYPE_BONO_FW:
2264 case BNX_DIR_TYPE_BONO_PATCH:
2265 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
2271 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
2274 static int bnxt_firmware_reset_chip(struct net_device *dev)
2276 struct bnxt *bp = netdev_priv(dev);
2279 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
2280 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
2282 return bnxt_hwrm_firmware_reset(dev,
2283 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
2284 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
2288 static int bnxt_firmware_reset_ap(struct net_device *dev)
2290 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
2291 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
2295 static int bnxt_flash_firmware(struct net_device *dev,
2304 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
2307 case BNX_DIR_TYPE_BOOTCODE:
2308 case BNX_DIR_TYPE_BOOTCODE_2:
2309 code_type = CODE_BOOT;
2311 case BNX_DIR_TYPE_CHIMP_PATCH:
2312 code_type = CODE_CHIMP_PATCH;
2314 case BNX_DIR_TYPE_APE_FW:
2315 code_type = CODE_MCTP_PASSTHRU;
2317 case BNX_DIR_TYPE_APE_PATCH:
2318 code_type = CODE_APE_PATCH;
2320 case BNX_DIR_TYPE_KONG_FW:
2321 code_type = CODE_KONG_FW;
2323 case BNX_DIR_TYPE_KONG_PATCH:
2324 code_type = CODE_KONG_PATCH;
2326 case BNX_DIR_TYPE_BONO_FW:
2327 code_type = CODE_BONO_FW;
2329 case BNX_DIR_TYPE_BONO_PATCH:
2330 code_type = CODE_BONO_PATCH;
2333 netdev_err(dev, "Unsupported directory entry type: %u\n",
2337 if (fw_size < sizeof(struct bnxt_fw_header)) {
2338 netdev_err(dev, "Invalid firmware file size: %u\n",
2339 (unsigned int)fw_size);
2342 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
2343 netdev_err(dev, "Invalid firmware signature: %08X\n",
2344 le32_to_cpu(header->signature));
2347 if (header->code_type != code_type) {
2348 netdev_err(dev, "Expected firmware type: %d, read: %d\n",
2349 code_type, header->code_type);
2352 if (header->device != DEVICE_CUMULUS_FAMILY) {
2353 netdev_err(dev, "Expected firmware device family %d, read: %d\n",
2354 DEVICE_CUMULUS_FAMILY, header->device);
2357 /* Confirm the CRC32 checksum of the file: */
2358 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2359 sizeof(stored_crc)));
2360 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2361 if (calculated_crc != stored_crc) {
2362 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
2363 (unsigned long)stored_crc,
2364 (unsigned long)calculated_crc);
2367 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2368 0, 0, 0, fw_data, fw_size);
2369 if (rc == 0) /* Firmware update successful */
2370 rc = bnxt_firmware_reset(dev, dir_type);
2375 static int bnxt_flash_microcode(struct net_device *dev,
2380 struct bnxt_ucode_trailer *trailer;
2385 if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
2386 netdev_err(dev, "Invalid microcode file size: %u\n",
2387 (unsigned int)fw_size);
2390 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
2392 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
2393 netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
2394 le32_to_cpu(trailer->sig));
2397 if (le16_to_cpu(trailer->dir_type) != dir_type) {
2398 netdev_err(dev, "Expected microcode type: %d, read: %d\n",
2399 dir_type, le16_to_cpu(trailer->dir_type));
2402 if (le16_to_cpu(trailer->trailer_length) <
2403 sizeof(struct bnxt_ucode_trailer)) {
2404 netdev_err(dev, "Invalid microcode trailer length: %d\n",
2405 le16_to_cpu(trailer->trailer_length));
2409 /* Confirm the CRC32 checksum of the file: */
2410 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2411 sizeof(stored_crc)));
2412 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2413 if (calculated_crc != stored_crc) {
2415 "CRC32 (%08lX) does not match calculated: %08lX\n",
2416 (unsigned long)stored_crc,
2417 (unsigned long)calculated_crc);
2420 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2421 0, 0, 0, fw_data, fw_size);
2426 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
2429 case BNX_DIR_TYPE_CHIMP_PATCH:
2430 case BNX_DIR_TYPE_BOOTCODE:
2431 case BNX_DIR_TYPE_BOOTCODE_2:
2432 case BNX_DIR_TYPE_APE_FW:
2433 case BNX_DIR_TYPE_APE_PATCH:
2434 case BNX_DIR_TYPE_KONG_FW:
2435 case BNX_DIR_TYPE_KONG_PATCH:
2436 case BNX_DIR_TYPE_BONO_FW:
2437 case BNX_DIR_TYPE_BONO_PATCH:
2444 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
2447 case BNX_DIR_TYPE_AVS:
2448 case BNX_DIR_TYPE_EXP_ROM_MBA:
2449 case BNX_DIR_TYPE_PCIE:
2450 case BNX_DIR_TYPE_TSCF_UCODE:
2451 case BNX_DIR_TYPE_EXT_PHY:
2452 case BNX_DIR_TYPE_CCM:
2453 case BNX_DIR_TYPE_ISCSI_BOOT:
2454 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
2455 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
2462 static bool bnxt_dir_type_is_executable(u16 dir_type)
2464 return bnxt_dir_type_is_ape_bin_format(dir_type) ||
2465 bnxt_dir_type_is_other_exec_format(dir_type);
2468 static int bnxt_flash_firmware_from_file(struct net_device *dev,
2470 const char *filename)
2472 const struct firmware *fw;
2475 rc = request_firmware(&fw, filename, &dev->dev);
2477 netdev_err(dev, "Error %d requesting firmware file: %s\n",
2481 if (bnxt_dir_type_is_ape_bin_format(dir_type))
2482 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
2483 else if (bnxt_dir_type_is_other_exec_format(dir_type))
2484 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
2486 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2487 0, 0, 0, fw->data, fw->size);
2488 release_firmware(fw);
2492 #define BNXT_PKG_DMA_SIZE 0x40000
2493 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
2494 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
2496 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
2499 struct hwrm_nvm_install_update_input *install;
2500 struct hwrm_nvm_install_update_output *resp;
2501 struct hwrm_nvm_modify_input *modify;
2502 struct bnxt *bp = netdev_priv(dev);
2503 bool defrag_attempted = false;
2504 dma_addr_t dma_handle;
2511 bnxt_hwrm_fw_set_time(bp);
2513 rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
2517 /* Try allocating a large DMA buffer first. Older fw will
2518 * cause excessive NVRAM erases when using small blocks.
2520 modify_len = roundup_pow_of_two(fw->size);
2521 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
2523 kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle);
2524 if (!kmem && modify_len > PAGE_SIZE)
2530 hwrm_req_drop(bp, modify);
2534 rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE);
2536 hwrm_req_drop(bp, modify);
2540 hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
2541 hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
2543 hwrm_req_hold(bp, modify);
2544 modify->host_src_addr = cpu_to_le64(dma_handle);
2546 resp = hwrm_req_hold(bp, install);
2547 if ((install_type & 0xffff) == 0)
2548 install_type >>= 16;
2549 install->install_type = cpu_to_le32(install_type);
2552 u32 copied = 0, len = modify_len;
2554 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
2555 BNX_DIR_ORDINAL_FIRST,
2557 &index, &item_len, NULL);
2559 netdev_err(dev, "PKG update area not created in nvram\n");
2562 if (fw->size > item_len) {
2563 netdev_err(dev, "PKG insufficient update area in nvram: %lu\n",
2564 (unsigned long)fw->size);
2569 modify->dir_idx = cpu_to_le16(index);
2571 if (fw->size > modify_len)
2572 modify->flags = BNXT_NVM_MORE_FLAG;
2573 while (copied < fw->size) {
2574 u32 balance = fw->size - copied;
2576 if (balance <= modify_len) {
2579 modify->flags |= BNXT_NVM_LAST_FLAG;
2581 memcpy(kmem, fw->data + copied, len);
2582 modify->len = cpu_to_le32(len);
2583 modify->offset = cpu_to_le32(copied);
2584 rc = hwrm_req_send(bp, modify);
2590 rc = hwrm_req_send_silent(bp, install);
2592 if (defrag_attempted) {
2593 /* We have tried to defragment already in the previous
2594 * iteration. Return with the result for INSTALL_UPDATE
2599 if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
2600 NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
2602 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
2604 rc = hwrm_req_send_silent(bp, install);
2606 if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
2607 NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
2608 /* FW has cleared NVM area, driver will create
2609 * UPDATE directory and try the flash again
2611 defrag_attempted = true;
2613 rc = bnxt_flash_nvram(bp->dev,
2614 BNX_DIR_TYPE_UPDATE,
2615 BNX_DIR_ORDINAL_FIRST,
2616 0, 0, item_len, NULL, 0);
2618 netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
2621 netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
2623 } while (defrag_attempted && !rc);
2626 hwrm_req_drop(bp, modify);
2627 hwrm_req_drop(bp, install);
2630 netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
2631 (s8)resp->result, (int)resp->problem_item);
2635 bnxt_print_admin_err(bp);
2639 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
2642 const struct firmware *fw;
2645 rc = request_firmware(&fw, filename, &dev->dev);
2647 netdev_err(dev, "PKG error %d requesting file: %s\n",
2652 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type);
2654 release_firmware(fw);
2659 static int bnxt_flash_device(struct net_device *dev,
2660 struct ethtool_flash *flash)
2662 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
2663 netdev_err(dev, "flashdev not supported from a virtual function\n");
2667 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
2668 flash->region > 0xffff)
2669 return bnxt_flash_package_from_file(dev, flash->data,
2672 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
2675 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
2677 struct hwrm_nvm_get_dir_info_output *output;
2678 struct hwrm_nvm_get_dir_info_input *req;
2679 struct bnxt *bp = netdev_priv(dev);
2682 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO);
2686 output = hwrm_req_hold(bp, req);
2687 rc = hwrm_req_send(bp, req);
2689 *entries = le32_to_cpu(output->entries);
2690 *length = le32_to_cpu(output->entry_length);
2692 hwrm_req_drop(bp, req);
2696 static int bnxt_get_eeprom_len(struct net_device *dev)
2698 struct bnxt *bp = netdev_priv(dev);
2703 /* The -1 return value allows the entire 32-bit range of offsets to be
2704 * passed via the ethtool command-line utility.
2709 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
2711 struct bnxt *bp = netdev_priv(dev);
2717 dma_addr_t dma_handle;
2718 struct hwrm_nvm_get_dir_entries_input *req;
2720 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
2724 if (!dir_entries || !entry_length)
2727 /* Insert 2 bytes of directory info (count and size of entries) */
2731 *data++ = dir_entries;
2732 *data++ = entry_length;
2734 memset(data, 0xff, len);
2736 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES);
2740 buflen = dir_entries * entry_length;
2741 buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
2743 hwrm_req_drop(bp, req);
2746 req->host_dest_addr = cpu_to_le64(dma_handle);
2748 hwrm_req_hold(bp, req); /* hold the slice */
2749 rc = hwrm_req_send(bp, req);
2751 memcpy(data, buf, len > buflen ? buflen : len);
2752 hwrm_req_drop(bp, req);
2756 static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
2757 u32 length, u8 *data)
2759 struct bnxt *bp = netdev_priv(dev);
2762 dma_addr_t dma_handle;
2763 struct hwrm_nvm_read_input *req;
2768 rc = hwrm_req_init(bp, req, HWRM_NVM_READ);
2772 buf = hwrm_req_dma_slice(bp, req, length, &dma_handle);
2774 hwrm_req_drop(bp, req);
2778 req->host_dest_addr = cpu_to_le64(dma_handle);
2779 req->dir_idx = cpu_to_le16(index);
2780 req->offset = cpu_to_le32(offset);
2781 req->len = cpu_to_le32(length);
2783 hwrm_req_hold(bp, req); /* hold the slice */
2784 rc = hwrm_req_send(bp, req);
2786 memcpy(data, buf, length);
2787 hwrm_req_drop(bp, req);
2791 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2792 u16 ext, u16 *index, u32 *item_length,
2795 struct hwrm_nvm_find_dir_entry_output *output;
2796 struct hwrm_nvm_find_dir_entry_input *req;
2797 struct bnxt *bp = netdev_priv(dev);
2800 rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY);
2806 req->dir_type = cpu_to_le16(type);
2807 req->dir_ordinal = cpu_to_le16(ordinal);
2808 req->dir_ext = cpu_to_le16(ext);
2809 req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
2810 output = hwrm_req_hold(bp, req);
2811 rc = hwrm_req_send_silent(bp, req);
2814 *index = le16_to_cpu(output->dir_idx);
2816 *item_length = le32_to_cpu(output->dir_item_length);
2818 *data_length = le32_to_cpu(output->dir_data_length);
2820 hwrm_req_drop(bp, req);
2824 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
2826 char *retval = NULL;
2833 /* null-terminate the log data (removing last '\n'): */
2834 data[datalen - 1] = 0;
2835 for (p = data; *p != 0; p++) {
2838 while (*p != 0 && *p != '\n') {
2840 while (*p != 0 && *p != '\t' && *p != '\n')
2842 if (field == desired_field)
2857 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
2859 struct bnxt *bp = netdev_priv(dev);
2866 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
2867 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
2868 &index, NULL, &pkglen);
2872 pkgbuf = kzalloc(pkglen, GFP_KERNEL);
2874 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
2879 rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
2883 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
2885 if (pkgver && *pkgver != 0 && isdigit(*pkgver))
2886 strscpy(ver, pkgver, size);
2896 static void bnxt_get_pkgver(struct net_device *dev)
2898 struct bnxt *bp = netdev_priv(dev);
2899 char buf[FW_VER_STR_LEN];
2902 if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
2903 len = strlen(bp->fw_ver_str);
2904 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
2909 static int bnxt_get_eeprom(struct net_device *dev,
2910 struct ethtool_eeprom *eeprom,
2916 if (eeprom->offset == 0) /* special offset value to get directory */
2917 return bnxt_get_nvram_directory(dev, eeprom->len, data);
2919 index = eeprom->offset >> 24;
2920 offset = eeprom->offset & 0xffffff;
2923 netdev_err(dev, "unsupported index value: %d\n", index);
2927 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
2930 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
2932 struct hwrm_nvm_erase_dir_entry_input *req;
2933 struct bnxt *bp = netdev_priv(dev);
2936 rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY);
2940 req->dir_idx = cpu_to_le16(index);
2941 return hwrm_req_send(bp, req);
2944 static int bnxt_set_eeprom(struct net_device *dev,
2945 struct ethtool_eeprom *eeprom,
2948 struct bnxt *bp = netdev_priv(dev);
2950 u16 type, ext, ordinal, attr;
2953 netdev_err(dev, "NVM write not supported from a virtual function\n");
2957 type = eeprom->magic >> 16;
2959 if (type == 0xffff) { /* special value for directory operations */
2960 index = eeprom->magic & 0xff;
2961 dir_op = eeprom->magic >> 8;
2965 case 0x0e: /* erase */
2966 if (eeprom->offset != ~eeprom->magic)
2968 return bnxt_erase_nvram_directory(dev, index - 1);
2974 /* Create or re-write an NVM item: */
2975 if (bnxt_dir_type_is_executable(type))
2977 ext = eeprom->magic & 0xffff;
2978 ordinal = eeprom->offset >> 16;
2979 attr = eeprom->offset & 0xffff;
2981 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data,
2985 static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
2987 struct bnxt *bp = netdev_priv(dev);
2988 struct ethtool_eee *eee = &bp->eee;
2989 struct bnxt_link_info *link_info = &bp->link_info;
2993 if (!BNXT_PHY_CFG_ABLE(bp))
2996 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
2999 mutex_lock(&bp->link_lock);
3000 advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
3001 if (!edata->eee_enabled)
3004 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
3005 netdev_warn(dev, "EEE requires autoneg\n");
3009 if (edata->tx_lpi_enabled) {
3010 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
3011 edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
3012 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
3013 bp->lpi_tmr_lo, bp->lpi_tmr_hi);
3016 } else if (!bp->lpi_tmr_hi) {
3017 edata->tx_lpi_timer = eee->tx_lpi_timer;
3020 if (!edata->advertised) {
3021 edata->advertised = advertising & eee->supported;
3022 } else if (edata->advertised & ~advertising) {
3023 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
3024 edata->advertised, advertising);
3029 eee->advertised = edata->advertised;
3030 eee->tx_lpi_enabled = edata->tx_lpi_enabled;
3031 eee->tx_lpi_timer = edata->tx_lpi_timer;
3033 eee->eee_enabled = edata->eee_enabled;
3035 if (netif_running(dev))
3036 rc = bnxt_hwrm_set_link_setting(bp, false, true);
3039 mutex_unlock(&bp->link_lock);
3043 static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
3045 struct bnxt *bp = netdev_priv(dev);
3047 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
3051 if (!bp->eee.eee_enabled) {
3052 /* Preserve tx_lpi_timer so that the last value will be used
3053 * by default when it is re-enabled.
3055 edata->advertised = 0;
3056 edata->tx_lpi_enabled = 0;
3059 if (!bp->eee.eee_active)
3060 edata->lp_advertised = 0;
3065 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
3066 u16 page_number, u16 start_addr,
3067 u16 data_length, u8 *buf)
3069 struct hwrm_port_phy_i2c_read_output *output;
3070 struct hwrm_port_phy_i2c_read_input *req;
3071 int rc, byte_offset = 0;
3073 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ);
3077 output = hwrm_req_hold(bp, req);
3078 req->i2c_slave_addr = i2c_addr;
3079 req->page_number = cpu_to_le16(page_number);
3080 req->port_id = cpu_to_le16(bp->pf.port_id);
3084 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
3085 data_length -= xfer_size;
3086 req->page_offset = cpu_to_le16(start_addr + byte_offset);
3087 req->data_length = xfer_size;
3088 req->enables = cpu_to_le32(start_addr + byte_offset ?
3089 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
3090 rc = hwrm_req_send(bp, req);
3092 memcpy(buf + byte_offset, output->data, xfer_size);
3093 byte_offset += xfer_size;
3094 } while (!rc && data_length > 0);
3095 hwrm_req_drop(bp, req);
3100 static int bnxt_get_module_info(struct net_device *dev,
3101 struct ethtool_modinfo *modinfo)
3103 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
3104 struct bnxt *bp = netdev_priv(dev);
3107 /* No point in going further if phy status indicates
3108 * module is not inserted or if it is powered down or
3109 * if it is of type 10GBase-T
3111 if (bp->link_info.module_status >
3112 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
3115 /* This feature is not supported in older firmware versions */
3116 if (bp->hwrm_spec_code < 0x10202)
3119 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
3120 SFF_DIAG_SUPPORT_OFFSET + 1,
3123 u8 module_id = data[0];
3124 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
3126 switch (module_id) {
3127 case SFF_MODULE_ID_SFP:
3128 modinfo->type = ETH_MODULE_SFF_8472;
3129 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3130 if (!diag_supported)
3131 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
3133 case SFF_MODULE_ID_QSFP:
3134 case SFF_MODULE_ID_QSFP_PLUS:
3135 modinfo->type = ETH_MODULE_SFF_8436;
3136 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
3138 case SFF_MODULE_ID_QSFP28:
3139 modinfo->type = ETH_MODULE_SFF_8636;
3140 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
3150 static int bnxt_get_module_eeprom(struct net_device *dev,
3151 struct ethtool_eeprom *eeprom,
3154 struct bnxt *bp = netdev_priv(dev);
3155 u16 start = eeprom->offset, length = eeprom->len;
3158 memset(data, 0, eeprom->len);
3160 /* Read A0 portion of the EEPROM */
3161 if (start < ETH_MODULE_SFF_8436_LEN) {
3162 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
3163 length = ETH_MODULE_SFF_8436_LEN - start;
3164 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
3165 start, length, data);
3170 length = eeprom->len - length;
3173 /* Read A2 portion of the EEPROM */
3175 start -= ETH_MODULE_SFF_8436_LEN;
3176 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0,
3177 start, length, data);
3182 static int bnxt_nway_reset(struct net_device *dev)
3186 struct bnxt *bp = netdev_priv(dev);
3187 struct bnxt_link_info *link_info = &bp->link_info;
3189 if (!BNXT_PHY_CFG_ABLE(bp))
3192 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
3195 if (netif_running(dev))
3196 rc = bnxt_hwrm_set_link_setting(bp, true, false);
3201 static int bnxt_set_phys_id(struct net_device *dev,
3202 enum ethtool_phys_id_state state)
3204 struct hwrm_port_led_cfg_input *req;
3205 struct bnxt *bp = netdev_priv(dev);
3206 struct bnxt_pf_info *pf = &bp->pf;
3207 struct bnxt_led_cfg *led_cfg;
3212 if (!bp->num_leds || BNXT_VF(bp))
3215 if (state == ETHTOOL_ID_ACTIVE) {
3216 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
3217 duration = cpu_to_le16(500);
3218 } else if (state == ETHTOOL_ID_INACTIVE) {
3219 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
3220 duration = cpu_to_le16(0);
3224 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG);
3228 req->port_id = cpu_to_le16(pf->port_id);
3229 req->num_leds = bp->num_leds;
3230 led_cfg = (struct bnxt_led_cfg *)&req->led0_id;
3231 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3232 req->enables |= BNXT_LED_DFLT_ENABLES(i);
3233 led_cfg->led_id = bp->leds[i].led_id;
3234 led_cfg->led_state = led_state;
3235 led_cfg->led_blink_on = duration;
3236 led_cfg->led_blink_off = duration;
3237 led_cfg->led_group_id = bp->leds[i].led_group_id;
3239 return hwrm_req_send(bp, req);
3242 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
3244 struct hwrm_selftest_irq_input *req;
3247 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ);
3251 req->cmpl_ring = cpu_to_le16(cmpl_ring);
3252 return hwrm_req_send(bp, req);
3255 static int bnxt_test_irq(struct bnxt *bp)
3259 for (i = 0; i < bp->cp_nr_rings; i++) {
3260 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
3263 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
3270 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
3272 struct hwrm_port_mac_cfg_input *req;
3275 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
3279 req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
3281 req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
3283 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
3284 return hwrm_req_send(bp, req);
3287 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
3289 struct hwrm_port_phy_qcaps_output *resp;
3290 struct hwrm_port_phy_qcaps_input *req;
3293 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
3297 resp = hwrm_req_hold(bp, req);
3298 rc = hwrm_req_send(bp, req);
3300 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
3302 hwrm_req_drop(bp, req);
3306 static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
3307 struct hwrm_port_phy_cfg_input *req)
3309 struct bnxt_link_info *link_info = &bp->link_info;
3314 if (!link_info->autoneg ||
3315 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
3318 rc = bnxt_query_force_speeds(bp, &fw_advertising);
3322 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
3323 if (bp->link_info.link_up)
3324 fw_speed = bp->link_info.link_speed;
3325 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
3326 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
3327 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
3328 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
3329 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
3330 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
3331 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
3332 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
3334 req->force_link_speed = cpu_to_le16(fw_speed);
3335 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
3336 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
3337 rc = hwrm_req_send(bp, req);
3339 req->force_link_speed = cpu_to_le16(0);
3343 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
3345 struct hwrm_port_phy_cfg_input *req;
3348 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
3352 /* prevent bnxt_disable_an_for_lpbk() from consuming the request */
3353 hwrm_req_hold(bp, req);
3356 bnxt_disable_an_for_lpbk(bp, req);
3358 req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
3360 req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
3362 req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
3364 req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
3365 rc = hwrm_req_send(bp, req);
3366 hwrm_req_drop(bp, req);
3370 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3371 u32 raw_cons, int pkt_size)
3373 struct bnxt_napi *bnapi = cpr->bnapi;
3374 struct bnxt_rx_ring_info *rxr;
3375 struct bnxt_sw_rx_bd *rx_buf;
3376 struct rx_cmp *rxcmp;
3382 rxr = bnapi->rx_ring;
3383 cp_cons = RING_CMP(raw_cons);
3384 rxcmp = (struct rx_cmp *)
3385 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3386 cons = rxcmp->rx_cmp_opaque;
3387 rx_buf = &rxr->rx_buf_ring[cons];
3388 data = rx_buf->data_ptr;
3389 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
3390 if (len != pkt_size)
3393 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
3396 for ( ; i < pkt_size; i++) {
3397 if (data[i] != (u8)(i & 0xff))
3403 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3406 struct tx_cmp *txcmp;
3412 raw_cons = cpr->cp_raw_cons;
3413 for (i = 0; i < 200; i++) {
3414 cons = RING_CMP(raw_cons);
3415 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3417 if (!TX_CMP_VALID(txcmp, raw_cons)) {
3422 /* The valid test of the entry must be done first before
3423 * reading any further.
3426 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
3427 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
3428 raw_cons = NEXT_RAW_CMP(raw_cons);
3429 raw_cons = NEXT_RAW_CMP(raw_cons);
3432 raw_cons = NEXT_RAW_CMP(raw_cons);
3434 cpr->cp_raw_cons = raw_cons;
3438 static int bnxt_run_loopback(struct bnxt *bp)
3440 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
3441 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
3442 struct bnxt_cp_ring_info *cpr;
3443 int pkt_size, i = 0;
3444 struct sk_buff *skb;
3449 cpr = &rxr->bnapi->cp_ring;
3450 if (bp->flags & BNXT_FLAG_CHIP_P5)
3451 cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
3452 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
3453 skb = netdev_alloc_skb(bp->dev, pkt_size);
3456 data = skb_put(skb, pkt_size);
3457 eth_broadcast_addr(data);
3459 ether_addr_copy(&data[i], bp->dev->dev_addr);
3461 for ( ; i < pkt_size; i++)
3462 data[i] = (u8)(i & 0xff);
3464 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
3466 if (dma_mapping_error(&bp->pdev->dev, map)) {
3470 bnxt_xmit_bd(bp, txr, map, pkt_size);
3472 /* Sync BD data before updating doorbell */
3475 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
3476 rc = bnxt_poll_loopback(bp, cpr, pkt_size);
3478 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
3483 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
3485 struct hwrm_selftest_exec_output *resp;
3486 struct hwrm_selftest_exec_input *req;
3489 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC);
3493 hwrm_req_timeout(bp, req, bp->test_info->timeout);
3494 req->flags = test_mask;
3496 resp = hwrm_req_hold(bp, req);
3497 rc = hwrm_req_send(bp, req);
3498 *test_results = resp->test_success;
3499 hwrm_req_drop(bp, req);
3503 #define BNXT_DRV_TESTS 4
3504 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
3505 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
3506 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
3507 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3)
3509 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
3512 struct bnxt *bp = netdev_priv(dev);
3513 bool do_ext_lpbk = false;
3514 bool offline = false;
3515 u8 test_results = 0;
3519 if (!bp->num_tests || !BNXT_PF(bp))
3521 memset(buf, 0, sizeof(u64) * bp->num_tests);
3522 if (!netif_running(dev)) {
3523 etest->flags |= ETH_TEST_FL_FAILED;
3527 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
3528 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
3531 if (etest->flags & ETH_TEST_FL_OFFLINE) {
3532 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
3533 etest->flags |= ETH_TEST_FL_FAILED;
3534 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
3540 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
3541 u8 bit_val = 1 << i;
3543 if (!(bp->test_info->offline_mask & bit_val))
3544 test_mask |= bit_val;
3546 test_mask |= bit_val;
3549 bnxt_run_fw_tests(bp, test_mask, &test_results);
3551 rc = bnxt_close_nic(bp, false, false);
3554 bnxt_run_fw_tests(bp, test_mask, &test_results);
3556 buf[BNXT_MACLPBK_TEST_IDX] = 1;
3557 bnxt_hwrm_mac_loopback(bp, true);
3559 rc = bnxt_half_open_nic(bp);
3561 bnxt_hwrm_mac_loopback(bp, false);
3562 etest->flags |= ETH_TEST_FL_FAILED;
3565 if (bnxt_run_loopback(bp))
3566 etest->flags |= ETH_TEST_FL_FAILED;
3568 buf[BNXT_MACLPBK_TEST_IDX] = 0;
3570 bnxt_hwrm_mac_loopback(bp, false);
3571 bnxt_hwrm_phy_loopback(bp, true, false);
3573 if (bnxt_run_loopback(bp)) {
3574 buf[BNXT_PHYLPBK_TEST_IDX] = 1;
3575 etest->flags |= ETH_TEST_FL_FAILED;
3578 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
3579 bnxt_hwrm_phy_loopback(bp, true, true);
3581 if (bnxt_run_loopback(bp)) {
3582 buf[BNXT_EXTLPBK_TEST_IDX] = 1;
3583 etest->flags |= ETH_TEST_FL_FAILED;
3586 bnxt_hwrm_phy_loopback(bp, false, false);
3587 bnxt_half_close_nic(bp);
3588 rc = bnxt_open_nic(bp, false, true);
3590 if (rc || bnxt_test_irq(bp)) {
3591 buf[BNXT_IRQ_TEST_IDX] = 1;
3592 etest->flags |= ETH_TEST_FL_FAILED;
3594 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
3595 u8 bit_val = 1 << i;
3597 if ((test_mask & bit_val) && !(test_results & bit_val)) {
3599 etest->flags |= ETH_TEST_FL_FAILED;
3604 static int bnxt_reset(struct net_device *dev, u32 *flags)
3606 struct bnxt *bp = netdev_priv(dev);
3607 bool reload = false;
3614 netdev_err(dev, "Reset is not supported from a VF\n");
3618 if (pci_vfs_assigned(bp->pdev) &&
3619 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
3621 "Reset not allowed when VFs are assigned to VMs\n");
3625 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
3626 /* This feature is not supported in older firmware versions */
3627 if (bp->hwrm_spec_code >= 0x10803) {
3628 if (!bnxt_firmware_reset_chip(dev)) {
3629 netdev_info(dev, "Firmware reset request successful.\n");
3630 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
3632 *flags &= ~BNXT_FW_RESET_CHIP;
3634 } else if (req == BNXT_FW_RESET_CHIP) {
3635 return -EOPNOTSUPP; /* only request, fail hard */
3639 if (req & BNXT_FW_RESET_AP) {
3640 /* This feature is not supported in older firmware versions */
3641 if (bp->hwrm_spec_code >= 0x10803) {
3642 if (!bnxt_firmware_reset_ap(dev)) {
3643 netdev_info(dev, "Reset application processor successful.\n");
3645 *flags &= ~BNXT_FW_RESET_AP;
3647 } else if (req == BNXT_FW_RESET_AP) {
3648 return -EOPNOTSUPP; /* only request, fail hard */
3653 netdev_info(dev, "Reload driver to complete reset\n");
3658 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
3660 struct bnxt *bp = netdev_priv(dev);
3662 if (dump->flag > BNXT_DUMP_CRASH) {
3663 netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
3667 if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
3668 netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
3672 bp->dump_flag = dump->flag;
3676 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
3678 struct bnxt *bp = netdev_priv(dev);
3680 if (bp->hwrm_spec_code < 0x10801)
3683 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
3684 bp->ver_resp.hwrm_fw_min_8b << 16 |
3685 bp->ver_resp.hwrm_fw_bld_8b << 8 |
3686 bp->ver_resp.hwrm_fw_rsvd_8b;
3688 dump->flag = bp->dump_flag;
3689 dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
3693 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
3696 struct bnxt *bp = netdev_priv(dev);
3698 if (bp->hwrm_spec_code < 0x10801)
3701 memset(buf, 0, dump->len);
3703 dump->flag = bp->dump_flag;
3704 return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
3707 static int bnxt_get_ts_info(struct net_device *dev,
3708 struct ethtool_ts_info *info)
3710 struct bnxt *bp = netdev_priv(dev);
3711 struct bnxt_ptp_cfg *ptp;
3714 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
3715 SOF_TIMESTAMPING_RX_SOFTWARE |
3716 SOF_TIMESTAMPING_SOFTWARE;
3718 info->phc_index = -1;
3722 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
3723 SOF_TIMESTAMPING_RX_HARDWARE |
3724 SOF_TIMESTAMPING_RAW_HARDWARE;
3726 info->phc_index = ptp_clock_index(ptp->ptp_clock);
3728 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
3730 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
3731 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
3732 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
3736 void bnxt_ethtool_init(struct bnxt *bp)
3738 struct hwrm_selftest_qlist_output *resp;
3739 struct hwrm_selftest_qlist_input *req;
3740 struct bnxt_test_info *test_info;
3741 struct net_device *dev = bp->dev;
3744 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
3745 bnxt_get_pkgver(dev);
3748 if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
3751 test_info = bp->test_info;
3753 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
3756 bp->test_info = test_info;
3759 if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST))
3762 resp = hwrm_req_hold(bp, req);
3763 rc = hwrm_req_send_silent(bp, req);
3765 goto ethtool_init_exit;
3767 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
3768 if (bp->num_tests > BNXT_MAX_TEST)
3769 bp->num_tests = BNXT_MAX_TEST;
3771 test_info->offline_mask = resp->offline_tests;
3772 test_info->timeout = le16_to_cpu(resp->test_timeout);
3773 if (!test_info->timeout)
3774 test_info->timeout = HWRM_CMD_TIMEOUT;
3775 for (i = 0; i < bp->num_tests; i++) {
3776 char *str = test_info->string[i];
3777 char *fw_str = resp->test0_name + i * 32;
3779 if (i == BNXT_MACLPBK_TEST_IDX) {
3780 strcpy(str, "Mac loopback test (offline)");
3781 } else if (i == BNXT_PHYLPBK_TEST_IDX) {
3782 strcpy(str, "Phy loopback test (offline)");
3783 } else if (i == BNXT_EXTLPBK_TEST_IDX) {
3784 strcpy(str, "Ext loopback test (offline)");
3785 } else if (i == BNXT_IRQ_TEST_IDX) {
3786 strcpy(str, "Interrupt_test (offline)");
3788 strlcpy(str, fw_str, ETH_GSTRING_LEN);
3789 strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
3790 if (test_info->offline_mask & (1 << i))
3791 strncat(str, " (offline)",
3792 ETH_GSTRING_LEN - strlen(str));
3794 strncat(str, " (online)",
3795 ETH_GSTRING_LEN - strlen(str));
3800 hwrm_req_drop(bp, req);
3803 static void bnxt_get_eth_phy_stats(struct net_device *dev,
3804 struct ethtool_eth_phy_stats *phy_stats)
3806 struct bnxt *bp = netdev_priv(dev);
3809 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
3812 rx = bp->rx_port_stats_ext.sw_stats;
3813 phy_stats->SymbolErrorDuringCarrier =
3814 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err));
3817 static void bnxt_get_eth_mac_stats(struct net_device *dev,
3818 struct ethtool_eth_mac_stats *mac_stats)
3820 struct bnxt *bp = netdev_priv(dev);
3823 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
3826 rx = bp->port_stats.sw_stats;
3827 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3829 mac_stats->FramesReceivedOK =
3830 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames);
3831 mac_stats->FramesTransmittedOK =
3832 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames);
3833 mac_stats->FrameCheckSequenceErrors =
3834 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
3835 mac_stats->AlignmentErrors =
3836 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
3837 mac_stats->OutOfRangeLengthField =
3838 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames);
3841 static void bnxt_get_eth_ctrl_stats(struct net_device *dev,
3842 struct ethtool_eth_ctrl_stats *ctrl_stats)
3844 struct bnxt *bp = netdev_priv(dev);
3847 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
3850 rx = bp->port_stats.sw_stats;
3851 ctrl_stats->MACControlFramesReceived =
3852 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames);
3855 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = {
3869 static void bnxt_get_rmon_stats(struct net_device *dev,
3870 struct ethtool_rmon_stats *rmon_stats,
3871 const struct ethtool_rmon_hist_range **ranges)
3873 struct bnxt *bp = netdev_priv(dev);
3876 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
3879 rx = bp->port_stats.sw_stats;
3880 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3882 rmon_stats->jabbers =
3883 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
3884 rmon_stats->oversize_pkts =
3885 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames);
3886 rmon_stats->undersize_pkts =
3887 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames);
3889 rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames);
3890 rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames);
3891 rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames);
3892 rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames);
3893 rmon_stats->hist[4] =
3894 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames);
3895 rmon_stats->hist[5] =
3896 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames);
3897 rmon_stats->hist[6] =
3898 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames);
3899 rmon_stats->hist[7] =
3900 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames);
3901 rmon_stats->hist[8] =
3902 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames);
3903 rmon_stats->hist[9] =
3904 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames);
3906 rmon_stats->hist_tx[0] =
3907 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames);
3908 rmon_stats->hist_tx[1] =
3909 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames);
3910 rmon_stats->hist_tx[2] =
3911 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames);
3912 rmon_stats->hist_tx[3] =
3913 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames);
3914 rmon_stats->hist_tx[4] =
3915 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames);
3916 rmon_stats->hist_tx[5] =
3917 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames);
3918 rmon_stats->hist_tx[6] =
3919 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames);
3920 rmon_stats->hist_tx[7] =
3921 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames);
3922 rmon_stats->hist_tx[8] =
3923 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames);
3924 rmon_stats->hist_tx[9] =
3925 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames);
3927 *ranges = bnxt_rmon_ranges;
3930 void bnxt_ethtool_free(struct bnxt *bp)
3932 kfree(bp->test_info);
3933 bp->test_info = NULL;
3936 const struct ethtool_ops bnxt_ethtool_ops = {
3937 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3938 ETHTOOL_COALESCE_MAX_FRAMES |
3939 ETHTOOL_COALESCE_USECS_IRQ |
3940 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
3941 ETHTOOL_COALESCE_STATS_BLOCK_USECS |
3942 ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
3943 ETHTOOL_COALESCE_USE_CQE,
3944 .get_link_ksettings = bnxt_get_link_ksettings,
3945 .set_link_ksettings = bnxt_set_link_ksettings,
3946 .get_fec_stats = bnxt_get_fec_stats,
3947 .get_fecparam = bnxt_get_fecparam,
3948 .set_fecparam = bnxt_set_fecparam,
3949 .get_pause_stats = bnxt_get_pause_stats,
3950 .get_pauseparam = bnxt_get_pauseparam,
3951 .set_pauseparam = bnxt_set_pauseparam,
3952 .get_drvinfo = bnxt_get_drvinfo,
3953 .get_regs_len = bnxt_get_regs_len,
3954 .get_regs = bnxt_get_regs,
3955 .get_wol = bnxt_get_wol,
3956 .set_wol = bnxt_set_wol,
3957 .get_coalesce = bnxt_get_coalesce,
3958 .set_coalesce = bnxt_set_coalesce,
3959 .get_msglevel = bnxt_get_msglevel,
3960 .set_msglevel = bnxt_set_msglevel,
3961 .get_sset_count = bnxt_get_sset_count,
3962 .get_strings = bnxt_get_strings,
3963 .get_ethtool_stats = bnxt_get_ethtool_stats,
3964 .set_ringparam = bnxt_set_ringparam,
3965 .get_ringparam = bnxt_get_ringparam,
3966 .get_channels = bnxt_get_channels,
3967 .set_channels = bnxt_set_channels,
3968 .get_rxnfc = bnxt_get_rxnfc,
3969 .set_rxnfc = bnxt_set_rxnfc,
3970 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
3971 .get_rxfh_key_size = bnxt_get_rxfh_key_size,
3972 .get_rxfh = bnxt_get_rxfh,
3973 .set_rxfh = bnxt_set_rxfh,
3974 .flash_device = bnxt_flash_device,
3975 .get_eeprom_len = bnxt_get_eeprom_len,
3976 .get_eeprom = bnxt_get_eeprom,
3977 .set_eeprom = bnxt_set_eeprom,
3978 .get_link = bnxt_get_link,
3979 .get_eee = bnxt_get_eee,
3980 .set_eee = bnxt_set_eee,
3981 .get_module_info = bnxt_get_module_info,
3982 .get_module_eeprom = bnxt_get_module_eeprom,
3983 .nway_reset = bnxt_nway_reset,
3984 .set_phys_id = bnxt_set_phys_id,
3985 .self_test = bnxt_self_test,
3986 .get_ts_info = bnxt_get_ts_info,
3987 .reset = bnxt_reset,
3988 .set_dump = bnxt_set_dump,
3989 .get_dump_flag = bnxt_get_dump_flag,
3990 .get_dump_data = bnxt_get_dump_data,
3991 .get_eth_phy_stats = bnxt_get_eth_phy_stats,
3992 .get_eth_mac_stats = bnxt_get_eth_mac_stats,
3993 .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats,
3994 .get_rmon_stats = bnxt_get_rmon_stats,