1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2015 Chelsio Communications. All rights reserved.
6 #include <linux/firmware.h>
7 #include <linux/mdio.h>
12 #include "cxgb4_cudbg.h"
13 #include "cxgb4_filter.h"
14 #include "cxgb4_tc_flower.h"
16 #define EEPROM_MAGIC 0x38E2F10C
18 static u32 get_msglevel(struct net_device *dev)
20 return netdev2adap(dev)->msg_enable;
23 static void set_msglevel(struct net_device *dev, u32 val)
25 netdev2adap(dev)->msg_enable = val;
28 enum cxgb4_ethtool_tests {
29 CXGB4_ETHTOOL_LB_TEST,
30 CXGB4_ETHTOOL_MAX_TEST,
33 static const char cxgb4_selftest_strings[CXGB4_ETHTOOL_MAX_TEST][ETH_GSTRING_LEN] = {
34 "Loop back test (offline)",
37 static const char * const flash_region_strings[] = {
45 static const char stats_strings[][ETH_GSTRING_LEN] = {
48 "tx_broadcast_frames ",
49 "tx_multicast_frames ",
54 "tx_frames_65_to_127 ",
55 "tx_frames_128_to_255 ",
56 "tx_frames_256_to_511 ",
57 "tx_frames_512_to_1023 ",
58 "tx_frames_1024_to_1518 ",
59 "tx_frames_1519_to_max ",
74 "rx_broadcast_frames ",
75 "rx_multicast_frames ",
78 "rx_frames_too_long ",
86 "rx_frames_65_to_127 ",
87 "rx_frames_128_to_255 ",
88 "rx_frames_256_to_511 ",
89 "rx_frames_512_to_1023 ",
90 "rx_frames_1024_to_1518 ",
91 "rx_frames_1519_to_max ",
103 "rx_bg0_frames_dropped ",
104 "rx_bg1_frames_dropped ",
105 "rx_bg2_frames_dropped ",
106 "rx_bg3_frames_dropped ",
107 "rx_bg0_frames_trunc ",
108 "rx_bg1_frames_trunc ",
109 "rx_bg2_frames_trunc ",
110 "rx_bg3_frames_trunc ",
122 static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
126 "write_coal_success ",
128 #ifdef CONFIG_CHELSIO_TLS_DEVICE
129 "tx_tls_encrypted_packets",
130 "tx_tls_encrypted_bytes ",
133 "tx_tls_skip_no_sync_data",
134 "tx_tls_drop_no_sync_data",
135 "tx_tls_drop_bypass_req ",
139 static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
140 "-------Loopback----------- ",
149 "frames_128_to_255 ",
150 "frames_256_to_511 ",
151 "frames_512_to_1023 ",
152 "frames_1024_to_1518 ",
153 "frames_1519_to_max ",
155 "bg0_frames_dropped ",
156 "bg1_frames_dropped ",
157 "bg2_frames_dropped ",
158 "bg3_frames_dropped ",
165 static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = {
166 [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr",
169 static int get_sset_count(struct net_device *dev, int sset)
173 return ARRAY_SIZE(stats_strings) +
174 ARRAY_SIZE(adapter_stats_strings) +
175 ARRAY_SIZE(loopback_stats_strings);
176 case ETH_SS_PRIV_FLAGS:
177 return ARRAY_SIZE(cxgb4_priv_flags_strings);
179 return ARRAY_SIZE(cxgb4_selftest_strings);
185 static int get_regs_len(struct net_device *dev)
187 struct adapter *adap = netdev2adap(dev);
189 return t4_get_regs_len(adap);
192 static int get_eeprom_len(struct net_device *dev)
197 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
199 struct adapter *adapter = netdev2adap(dev);
202 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
203 strlcpy(info->bus_info, pci_name(adapter->pdev),
204 sizeof(info->bus_info));
205 info->regdump_len = get_regs_len(dev);
207 if (adapter->params.fw_vers)
208 snprintf(info->fw_version, sizeof(info->fw_version),
209 "%u.%u.%u.%u, TP %u.%u.%u.%u",
210 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
211 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
212 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
213 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
214 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
215 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
216 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
217 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
219 if (!t4_get_exprom_version(adapter, &exprom_vers))
220 snprintf(info->erom_version, sizeof(info->erom_version),
222 FW_HDR_FW_VER_MAJOR_G(exprom_vers),
223 FW_HDR_FW_VER_MINOR_G(exprom_vers),
224 FW_HDR_FW_VER_MICRO_G(exprom_vers),
225 FW_HDR_FW_VER_BUILD_G(exprom_vers));
226 info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings);
229 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
231 if (stringset == ETH_SS_STATS) {
232 memcpy(data, stats_strings, sizeof(stats_strings));
233 data += sizeof(stats_strings);
234 memcpy(data, adapter_stats_strings,
235 sizeof(adapter_stats_strings));
236 data += sizeof(adapter_stats_strings);
237 memcpy(data, loopback_stats_strings,
238 sizeof(loopback_stats_strings));
239 } else if (stringset == ETH_SS_PRIV_FLAGS) {
240 memcpy(data, cxgb4_priv_flags_strings,
241 sizeof(cxgb4_priv_flags_strings));
242 } else if (stringset == ETH_SS_TEST) {
243 memcpy(data, cxgb4_selftest_strings,
244 sizeof(cxgb4_selftest_strings));
248 /* port stats maintained per queue of the port. They should be in the same
249 * order as in stats_strings above.
251 struct queue_port_stats {
262 struct adapter_stats {
268 #ifdef CONFIG_CHELSIO_TLS_DEVICE
269 u64 tx_tls_encrypted_packets;
270 u64 tx_tls_encrypted_bytes;
273 u64 tx_tls_skip_no_sync_data;
274 u64 tx_tls_drop_no_sync_data;
275 u64 tx_tls_drop_bypass_req;
279 static void collect_sge_port_stats(const struct adapter *adap,
280 const struct port_info *p,
281 struct queue_port_stats *s)
283 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
284 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
285 struct sge_eohw_txq *eohw_tx;
288 memset(s, 0, sizeof(*s));
289 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
292 s->tx_csum += tx->tx_cso;
293 s->rx_csum += rx->stats.rx_cso;
294 s->vlan_ex += rx->stats.vlan_ex;
295 s->vlan_ins += tx->vlan_ins;
296 s->gro_pkts += rx->stats.lro_pkts;
297 s->gro_merged += rx->stats.lro_merged;
300 if (adap->sge.eohw_txq) {
301 eohw_tx = &adap->sge.eohw_txq[p->first_qset];
302 for (i = 0; i < p->nqsets; i++, eohw_tx++) {
303 s->tso += eohw_tx->tso;
304 s->uso += eohw_tx->uso;
305 s->tx_csum += eohw_tx->tx_cso;
306 s->vlan_ins += eohw_tx->vlan_ins;
311 static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
315 memset(s, 0, sizeof(*s));
317 s->db_drop = adap->db_stats.db_drop;
318 s->db_full = adap->db_stats.db_full;
319 s->db_empty = adap->db_stats.db_empty;
321 if (!is_t4(adap->params.chip)) {
324 v = t4_read_reg(adap, SGE_STAT_CFG_A);
325 if (STATSOURCE_T5_G(v) == 7) {
326 val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
327 val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
328 s->wc_success = val1 - val2;
334 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
337 struct port_info *pi = netdev_priv(dev);
338 struct adapter *adapter = pi->adapter;
339 struct lb_port_stats s;
343 t4_get_port_stats_offset(adapter, pi->tx_chan,
344 (struct port_stats *)data,
347 data += sizeof(struct port_stats) / sizeof(u64);
348 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
349 data += sizeof(struct queue_port_stats) / sizeof(u64);
350 collect_adapter_stats(adapter, (struct adapter_stats *)data);
351 data += sizeof(struct adapter_stats) / sizeof(u64);
353 *data++ = (u64)pi->port_id;
354 memset(&s, 0, sizeof(s));
355 t4_get_lb_stats(adapter, pi->port_id, &s);
358 for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
359 *data++ = (unsigned long long)*p0++;
362 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
365 struct adapter *adap = netdev2adap(dev);
368 buf_size = t4_get_regs_len(adap);
369 regs->version = mk_adap_vers(adap);
370 t4_get_regs(adap, buf, buf_size);
373 static int restart_autoneg(struct net_device *dev)
375 struct port_info *p = netdev_priv(dev);
377 if (!netif_running(dev))
379 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
381 t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
385 static int identify_port(struct net_device *dev,
386 enum ethtool_phys_id_state state)
389 struct adapter *adap = netdev2adap(dev);
391 if (state == ETHTOOL_ID_ACTIVE)
393 else if (state == ETHTOOL_ID_INACTIVE)
398 return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
402 * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
403 * @port_type: Firmware Port Type
404 * @mod_type: Firmware Module Type
406 * Translate Firmware Port/Module type to Ethtool Port Type.
408 static int from_fw_port_mod_type(enum fw_port_type port_type,
409 enum fw_port_module_type mod_type)
411 if (port_type == FW_PORT_TYPE_BT_SGMII ||
412 port_type == FW_PORT_TYPE_BT_XFI ||
413 port_type == FW_PORT_TYPE_BT_XAUI) {
415 } else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
416 port_type == FW_PORT_TYPE_FIBER_XAUI) {
418 } else if (port_type == FW_PORT_TYPE_SFP ||
419 port_type == FW_PORT_TYPE_QSFP_10G ||
420 port_type == FW_PORT_TYPE_QSA ||
421 port_type == FW_PORT_TYPE_QSFP ||
422 port_type == FW_PORT_TYPE_CR4_QSFP ||
423 port_type == FW_PORT_TYPE_CR_QSFP ||
424 port_type == FW_PORT_TYPE_CR2_QSFP ||
425 port_type == FW_PORT_TYPE_SFP28) {
426 if (mod_type == FW_PORT_MOD_TYPE_LR ||
427 mod_type == FW_PORT_MOD_TYPE_SR ||
428 mod_type == FW_PORT_MOD_TYPE_ER ||
429 mod_type == FW_PORT_MOD_TYPE_LRM)
431 else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
432 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
436 } else if (port_type == FW_PORT_TYPE_KR4_100G ||
437 port_type == FW_PORT_TYPE_KR_SFP28 ||
438 port_type == FW_PORT_TYPE_KR_XLAUI) {
446 * speed_to_fw_caps - translate Port Speed to Firmware Port Capabilities
447 * @speed: speed in Kb/s
449 * Translates a specific Port Speed into a Firmware Port Capabilities
452 static unsigned int speed_to_fw_caps(int speed)
455 return FW_PORT_CAP32_SPEED_100M;
457 return FW_PORT_CAP32_SPEED_1G;
459 return FW_PORT_CAP32_SPEED_10G;
461 return FW_PORT_CAP32_SPEED_25G;
463 return FW_PORT_CAP32_SPEED_40G;
465 return FW_PORT_CAP32_SPEED_50G;
467 return FW_PORT_CAP32_SPEED_100G;
469 return FW_PORT_CAP32_SPEED_200G;
471 return FW_PORT_CAP32_SPEED_400G;
476 * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
477 * @port_type: Firmware Port Type
478 * @fw_caps: Firmware Port Capabilities
479 * @link_mode_mask: ethtool Link Mode Mask
481 * Translate a Firmware Port Capabilities specification to an ethtool
484 static void fw_caps_to_lmm(enum fw_port_type port_type,
485 fw_port_cap32_t fw_caps,
486 unsigned long *link_mode_mask)
488 #define SET_LMM(__lmm_name) \
490 __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
494 #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
496 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
497 SET_LMM(__lmm_name); \
501 case FW_PORT_TYPE_BT_SGMII:
502 case FW_PORT_TYPE_BT_XFI:
503 case FW_PORT_TYPE_BT_XAUI:
505 FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
506 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
507 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
510 case FW_PORT_TYPE_KX4:
511 case FW_PORT_TYPE_KX:
513 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
514 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
517 case FW_PORT_TYPE_KR:
519 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
522 case FW_PORT_TYPE_BP_AP:
524 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
525 FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
526 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
529 case FW_PORT_TYPE_BP4_AP:
531 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
532 FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
533 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
534 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
537 case FW_PORT_TYPE_FIBER_XFI:
538 case FW_PORT_TYPE_FIBER_XAUI:
539 case FW_PORT_TYPE_SFP:
540 case FW_PORT_TYPE_QSFP_10G:
541 case FW_PORT_TYPE_QSA:
543 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
544 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
547 case FW_PORT_TYPE_BP40_BA:
548 case FW_PORT_TYPE_QSFP:
550 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
551 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
552 FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
555 case FW_PORT_TYPE_CR_QSFP:
556 case FW_PORT_TYPE_SFP28:
558 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
559 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
560 FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
563 case FW_PORT_TYPE_KR_SFP28:
565 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
566 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
567 FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
570 case FW_PORT_TYPE_KR_XLAUI:
572 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
573 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
574 FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
577 case FW_PORT_TYPE_CR2_QSFP:
579 FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full);
582 case FW_PORT_TYPE_KR4_100G:
583 case FW_PORT_TYPE_CR4_QSFP:
585 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
586 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
587 FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
588 FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
589 FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
590 FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full);
597 if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
598 FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
599 FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
604 FW_CAPS_TO_LMM(ANEG, Autoneg);
605 FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
606 FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
608 #undef FW_CAPS_TO_LMM
613 * lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware
615 * @link_mode_mask: ethtool Link Mode Mask
617 * Translate ethtool Link Mode Mask into a Firmware Port capabilities
620 static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask)
622 unsigned int fw_caps = 0;
624 #define LMM_TO_FW_CAPS(__lmm_name, __fw_name) \
626 if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
628 fw_caps |= FW_PORT_CAP32_ ## __fw_name; \
631 LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M);
632 LMM_TO_FW_CAPS(1000baseT_Full, SPEED_1G);
633 LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G);
634 LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G);
635 LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G);
636 LMM_TO_FW_CAPS(50000baseCR2_Full, SPEED_50G);
637 LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G);
639 #undef LMM_TO_FW_CAPS
644 static int get_link_ksettings(struct net_device *dev,
645 struct ethtool_link_ksettings *link_ksettings)
647 struct port_info *pi = netdev_priv(dev);
648 struct ethtool_link_settings *base = &link_ksettings->base;
650 /* For the nonce, the Firmware doesn't send up Port State changes
651 * when the Virtual Interface attached to the Port is down. So
652 * if it's down, let's grab any changes.
654 if (!netif_running(dev))
655 (void)t4_update_port_info(pi);
657 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
658 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
659 ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
661 base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
663 if (pi->mdio_addr >= 0) {
664 base->phy_address = pi->mdio_addr;
665 base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
666 ? ETH_MDIO_SUPPORTS_C22
667 : ETH_MDIO_SUPPORTS_C45);
669 base->phy_address = 255;
670 base->mdio_support = 0;
673 fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
674 link_ksettings->link_modes.supported);
675 fw_caps_to_lmm(pi->port_type,
676 t4_link_acaps(pi->adapter,
679 link_ksettings->link_modes.advertising);
680 fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
681 link_ksettings->link_modes.lp_advertising);
683 base->speed = (netif_carrier_ok(dev)
686 base->duplex = DUPLEX_FULL;
688 base->autoneg = pi->link_cfg.autoneg;
689 if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
690 ethtool_link_ksettings_add_link_mode(link_ksettings,
692 if (pi->link_cfg.autoneg)
693 ethtool_link_ksettings_add_link_mode(link_ksettings,
694 advertising, Autoneg);
699 static int set_link_ksettings(struct net_device *dev,
700 const struct ethtool_link_ksettings *link_ksettings)
702 struct port_info *pi = netdev_priv(dev);
703 struct link_config *lc = &pi->link_cfg;
704 const struct ethtool_link_settings *base = &link_ksettings->base;
705 struct link_config old_lc;
706 unsigned int fw_caps;
709 /* only full-duplex supported */
710 if (base->duplex != DUPLEX_FULL)
714 if (!(lc->pcaps & FW_PORT_CAP32_ANEG) ||
715 base->autoneg == AUTONEG_DISABLE) {
716 fw_caps = speed_to_fw_caps(base->speed);
718 /* Speed must be supported by Physical Port Capabilities. */
719 if (!(lc->pcaps & fw_caps))
722 lc->speed_caps = fw_caps;
726 lmm_to_fw_caps(link_ksettings->link_modes.advertising);
727 if (!(lc->pcaps & fw_caps))
730 lc->acaps = fw_caps | FW_PORT_CAP32_ANEG;
732 lc->autoneg = base->autoneg;
734 /* If the firmware rejects the Link Configuration request, back out
735 * the changes and report the error.
737 ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, pi->tx_chan, lc);
744 /* Translate the Firmware FEC value into the ethtool value. */
745 static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
747 unsigned int eth_fec = 0;
749 if (fw_fec & FW_PORT_CAP32_FEC_RS)
750 eth_fec |= ETHTOOL_FEC_RS;
751 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
752 eth_fec |= ETHTOOL_FEC_BASER;
754 /* if nothing is set, then FEC is off */
756 eth_fec = ETHTOOL_FEC_OFF;
761 /* Translate Common Code FEC value into ethtool value. */
762 static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
764 unsigned int eth_fec = 0;
766 if (cc_fec & FEC_AUTO)
767 eth_fec |= ETHTOOL_FEC_AUTO;
769 eth_fec |= ETHTOOL_FEC_RS;
770 if (cc_fec & FEC_BASER_RS)
771 eth_fec |= ETHTOOL_FEC_BASER;
773 /* if nothing is set, then FEC is off */
775 eth_fec = ETHTOOL_FEC_OFF;
780 /* Translate ethtool FEC value into Common Code value. */
781 static inline unsigned int eth_to_cc_fec(unsigned int eth_fec)
783 unsigned int cc_fec = 0;
785 if (eth_fec & ETHTOOL_FEC_OFF)
788 if (eth_fec & ETHTOOL_FEC_AUTO)
790 if (eth_fec & ETHTOOL_FEC_RS)
792 if (eth_fec & ETHTOOL_FEC_BASER)
793 cc_fec |= FEC_BASER_RS;
798 static int get_fecparam(struct net_device *dev, struct ethtool_fecparam *fec)
800 const struct port_info *pi = netdev_priv(dev);
801 const struct link_config *lc = &pi->link_cfg;
803 /* Translate the Firmware FEC Support into the ethtool value. We
804 * always support IEEE 802.3 "automatic" selection of Link FEC type if
805 * any FEC is supported.
807 fec->fec = fwcap_to_eth_fec(lc->pcaps);
808 if (fec->fec != ETHTOOL_FEC_OFF)
809 fec->fec |= ETHTOOL_FEC_AUTO;
811 /* Translate the current internal FEC parameters into the
814 fec->active_fec = cc_to_eth_fec(lc->fec);
819 static int set_fecparam(struct net_device *dev, struct ethtool_fecparam *fec)
821 struct port_info *pi = netdev_priv(dev);
822 struct link_config *lc = &pi->link_cfg;
823 struct link_config old_lc;
826 /* Save old Link Configuration in case the L1 Configure below
831 /* Try to perform the L1 Configure and return the result of that
832 * effort. If it fails, revert the attempted change.
834 lc->requested_fec = eth_to_cc_fec(fec->fec);
835 ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox,
842 static void get_pauseparam(struct net_device *dev,
843 struct ethtool_pauseparam *epause)
845 struct port_info *p = netdev_priv(dev);
847 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
848 epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
849 epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
852 static int set_pauseparam(struct net_device *dev,
853 struct ethtool_pauseparam *epause)
855 struct port_info *p = netdev_priv(dev);
856 struct link_config *lc = &p->link_cfg;
858 if (epause->autoneg == AUTONEG_DISABLE)
859 lc->requested_fc = 0;
860 else if (lc->pcaps & FW_PORT_CAP32_ANEG)
861 lc->requested_fc = PAUSE_AUTONEG;
865 if (epause->rx_pause)
866 lc->requested_fc |= PAUSE_RX;
867 if (epause->tx_pause)
868 lc->requested_fc |= PAUSE_TX;
869 if (netif_running(dev))
870 return t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan,
875 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
877 const struct port_info *pi = netdev_priv(dev);
878 const struct sge *s = &pi->adapter->sge;
880 e->rx_max_pending = MAX_RX_BUFFERS;
881 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
882 e->rx_jumbo_max_pending = 0;
883 e->tx_max_pending = MAX_TXQ_ENTRIES;
885 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
886 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
887 e->rx_jumbo_pending = 0;
888 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
891 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
894 const struct port_info *pi = netdev_priv(dev);
895 struct adapter *adapter = pi->adapter;
896 struct sge *s = &adapter->sge;
898 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
899 e->tx_pending > MAX_TXQ_ENTRIES ||
900 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
901 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
902 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
905 if (adapter->flags & CXGB4_FULL_INIT_DONE)
908 for (i = 0; i < pi->nqsets; ++i) {
909 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
910 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
911 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
917 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
918 * @dev: the network device
919 * @us: the hold-off time in us, or 0 to disable timer
920 * @cnt: the hold-off packet count, or 0 to disable counter
922 * Set the RX interrupt hold-off parameters for a network device.
924 static int set_rx_intr_params(struct net_device *dev,
925 unsigned int us, unsigned int cnt)
928 struct port_info *pi = netdev_priv(dev);
929 struct adapter *adap = pi->adapter;
930 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
932 for (i = 0; i < pi->nqsets; i++, q++) {
933 err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt);
940 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
943 struct port_info *pi = netdev_priv(dev);
944 struct adapter *adap = pi->adapter;
945 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
947 for (i = 0; i < pi->nqsets; i++, q++)
948 q->rspq.adaptive_rx = adaptive_rx;
953 static int get_adaptive_rx_setting(struct net_device *dev)
955 struct port_info *pi = netdev_priv(dev);
956 struct adapter *adap = pi->adapter;
957 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
959 return q->rspq.adaptive_rx;
962 /* Return the current global Adapter SGE Doorbell Queue Timer Tick for all
963 * Ethernet TX Queues.
965 static int get_dbqtimer_tick(struct net_device *dev)
967 struct port_info *pi = netdev_priv(dev);
968 struct adapter *adap = pi->adapter;
970 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
973 return adap->sge.dbqtimer_tick;
976 /* Return the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
977 * associated with a Network Device.
979 static int get_dbqtimer(struct net_device *dev)
981 struct port_info *pi = netdev_priv(dev);
982 struct adapter *adap = pi->adapter;
983 struct sge_eth_txq *txq;
985 txq = &adap->sge.ethtxq[pi->first_qset];
987 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
990 /* all of the TX Queues use the same Timer Index */
991 return adap->sge.dbqtimer_val[txq->dbqtimerix];
994 /* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
995 * Queues. This is the fundamental "Tick" that sets the scale of values which
996 * can be used. Individual Ethernet TX Queues index into a relatively small
997 * array of Tick Multipliers. Changing the base Tick will thus change all of
998 * the resulting Timer Values associated with those multipliers for all
999 * Ethernet TX Queues.
1001 static int set_dbqtimer_tick(struct net_device *dev, int usecs)
1003 struct port_info *pi = netdev_priv(dev);
1004 struct adapter *adap = pi->adapter;
1005 struct sge *s = &adap->sge;
1009 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1012 /* return early if it's the same Timer Tick we're already using */
1013 if (s->dbqtimer_tick == usecs)
1016 /* attempt to set the new Timer Tick value */
1017 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1018 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
1020 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
1023 s->dbqtimer_tick = usecs;
1025 /* if successful, reread resulting dependent Timer values */
1026 ret = t4_read_sge_dbqtimers(adap, ARRAY_SIZE(s->dbqtimer_val),
1031 /* Set the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
1032 * associated with a Network Device. There is a relatively small array of
1033 * possible Timer Values so we need to pick the closest value available.
1035 static int set_dbqtimer(struct net_device *dev, int usecs)
1037 int qix, timerix, min_timerix, delta, min_delta;
1038 struct port_info *pi = netdev_priv(dev);
1039 struct adapter *adap = pi->adapter;
1040 struct sge *s = &adap->sge;
1041 struct sge_eth_txq *txq;
1045 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1048 /* Find the SGE Doorbell Timer Value that's closest to the requested
1051 min_delta = INT_MAX;
1053 for (timerix = 0; timerix < ARRAY_SIZE(s->dbqtimer_val); timerix++) {
1054 delta = s->dbqtimer_val[timerix] - usecs;
1057 if (delta < min_delta) {
1059 min_timerix = timerix;
1063 /* Return early if it's the same Timer Index we're already using.
1064 * We use the same Timer Index for all of the TX Queues for an
1065 * interface so it's only necessary to check the first one.
1067 txq = &s->ethtxq[pi->first_qset];
1068 if (txq->dbqtimerix == min_timerix)
1071 for (qix = 0; qix < pi->nqsets; qix++, txq++) {
1072 if (adap->flags & CXGB4_FULL_INIT_DONE) {
1074 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1075 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) |
1076 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
1078 ret = t4_set_params(adap, adap->mbox, adap->pf, 0,
1083 txq->dbqtimerix = min_timerix;
1088 /* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
1089 * Queues and the Timer Value for the Ethernet TX Queues associated with a
1090 * Network Device. Since changing the global Tick changes all of the
1091 * available Timer Values, we need to do this first before selecting the
1092 * resulting closest Timer Value. Moreover, since the Tick is global,
1093 * changing it affects the Timer Values for all Network Devices on the
1094 * adapter. So, before changing the Tick, we grab all of the current Timer
1095 * Values for other Network Devices on this Adapter and then attempt to select
1096 * new Timer Values which are close to the old values ...
1098 static int set_dbqtimer_tickval(struct net_device *dev,
1099 int tick_usecs, int timer_usecs)
1101 struct port_info *pi = netdev_priv(dev);
1102 struct adapter *adap = pi->adapter;
1103 int timer[MAX_NPORTS];
1107 /* Grab the other adapter Network Interface current timers and fill in
1108 * the new one for this Network Interface.
1110 for_each_port(adap, port)
1111 if (port == pi->port_id)
1112 timer[port] = timer_usecs;
1114 timer[port] = get_dbqtimer(adap->port[port]);
1116 /* Change the global Tick first ... */
1117 ret = set_dbqtimer_tick(dev, tick_usecs);
1121 /* ... and then set all of the Network Interface Timer Values ... */
1122 for_each_port(adap, port) {
1123 ret = set_dbqtimer(adap->port[port], timer[port]);
1131 static int set_coalesce(struct net_device *dev,
1132 struct ethtool_coalesce *coalesce)
1136 set_adaptive_rx_setting(dev, coalesce->use_adaptive_rx_coalesce);
1138 ret = set_rx_intr_params(dev, coalesce->rx_coalesce_usecs,
1139 coalesce->rx_max_coalesced_frames);
1143 return set_dbqtimer_tickval(dev,
1144 coalesce->tx_coalesce_usecs_irq,
1145 coalesce->tx_coalesce_usecs);
1148 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1150 const struct port_info *pi = netdev_priv(dev);
1151 const struct adapter *adap = pi->adapter;
1152 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1154 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1155 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
1156 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1157 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
1158 c->tx_coalesce_usecs_irq = get_dbqtimer_tick(dev);
1159 c->tx_coalesce_usecs = get_dbqtimer(dev);
1163 /* The next two routines implement eeprom read/write from physical addresses.
1165 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1167 int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1170 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1171 return vaddr < 0 ? vaddr : 0;
1174 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1176 int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1179 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1180 return vaddr < 0 ? vaddr : 0;
1183 #define EEPROM_MAGIC 0x38E2F10C
1185 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1189 struct adapter *adapter = netdev2adap(dev);
1190 u8 *buf = kvzalloc(EEPROMSIZE, GFP_KERNEL);
1195 e->magic = EEPROM_MAGIC;
1196 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1197 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1200 memcpy(data, buf + e->offset, e->len);
1205 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1210 u32 aligned_offset, aligned_len, *p;
1211 struct adapter *adapter = netdev2adap(dev);
1213 if (eeprom->magic != EEPROM_MAGIC)
1216 aligned_offset = eeprom->offset & ~3;
1217 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1219 if (adapter->pf > 0) {
1220 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1222 if (aligned_offset < start ||
1223 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1227 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1228 /* RMW possibly needed for first or last words.
1230 buf = kvzalloc(aligned_len, GFP_KERNEL);
1233 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1234 if (!err && aligned_len > 4)
1235 err = eeprom_rd_phys(adapter,
1236 aligned_offset + aligned_len - 4,
1237 (u32 *)&buf[aligned_len - 4]);
1240 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1245 err = t4_seeprom_wp(adapter, false);
1249 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1250 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1251 aligned_offset += 4;
1255 err = t4_seeprom_wp(adapter, true);
1262 static int cxgb4_ethtool_flash_bootcfg(struct net_device *netdev,
1263 const u8 *data, u32 size)
1265 struct adapter *adap = netdev2adap(netdev);
1268 ret = t4_load_bootcfg(adap, data, size);
1270 dev_err(adap->pdev_dev, "Failed to load boot cfg image\n");
1275 static int cxgb4_ethtool_flash_boot(struct net_device *netdev,
1276 const u8 *bdata, u32 size)
1278 struct adapter *adap = netdev2adap(netdev);
1279 unsigned int offset;
1283 data = kmemdup(bdata, size, GFP_KERNEL);
1287 offset = OFFSET_G(t4_read_reg(adap, PF_REG(0, PCIE_PF_EXPROM_OFST_A)));
1289 ret = t4_load_boot(adap, data, offset, size);
1291 dev_err(adap->pdev_dev, "Failed to load boot image\n");
1297 #define CXGB4_PHY_SIG 0x130000ea
1299 static int cxgb4_validate_phy_image(const u8 *data, u32 *size)
1301 struct cxgb4_fw_data *header;
1303 header = (struct cxgb4_fw_data *)data;
1304 if (be32_to_cpu(header->signature) != CXGB4_PHY_SIG)
1310 static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
1311 const u8 *data, u32 size)
1313 struct adapter *adap = netdev2adap(netdev);
1316 ret = cxgb4_validate_phy_image(data, NULL);
1318 dev_err(adap->pdev_dev, "PHY signature mismatch\n");
1322 spin_lock_bh(&adap->win0_lock);
1323 ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
1324 spin_unlock_bh(&adap->win0_lock);
1326 dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
1331 static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
1332 const u8 *data, u32 size)
1334 struct adapter *adap = netdev2adap(netdev);
1335 unsigned int mbox = PCIE_FW_MASTER_M + 1;
1338 /* If the adapter has been fully initialized then we'll go ahead and
1339 * try to get the firmware's cooperation in upgrading to the new
1340 * firmware image otherwise we'll try to do the entire job from the
1341 * host ... and we always "force" the operation in this path.
1343 if (adap->flags & CXGB4_FULL_INIT_DONE)
1346 ret = t4_fw_upgrade(adap, mbox, data, size, 1);
1348 dev_err(adap->pdev_dev,
1349 "Failed to flash firmware\n");
1354 static int cxgb4_ethtool_flash_region(struct net_device *netdev,
1355 const u8 *data, u32 size, u32 region)
1357 struct adapter *adap = netdev2adap(netdev);
1361 case CXGB4_ETHTOOL_FLASH_FW:
1362 ret = cxgb4_ethtool_flash_fw(netdev, data, size);
1364 case CXGB4_ETHTOOL_FLASH_PHY:
1365 ret = cxgb4_ethtool_flash_phy(netdev, data, size);
1367 case CXGB4_ETHTOOL_FLASH_BOOT:
1368 ret = cxgb4_ethtool_flash_boot(netdev, data, size);
1370 case CXGB4_ETHTOOL_FLASH_BOOTCFG:
1371 ret = cxgb4_ethtool_flash_bootcfg(netdev, data, size);
1379 dev_info(adap->pdev_dev,
1380 "loading %s successful, reload cxgb4 driver\n",
1381 flash_region_strings[region]);
1385 #define CXGB4_FW_SIG 0x4368656c
1386 #define CXGB4_FW_SIG_OFFSET 0x160
1388 static int cxgb4_validate_fw_image(const u8 *data, u32 *size)
1390 struct cxgb4_fw_data *header;
1392 header = (struct cxgb4_fw_data *)&data[CXGB4_FW_SIG_OFFSET];
1393 if (be32_to_cpu(header->signature) != CXGB4_FW_SIG)
1397 *size = be16_to_cpu(((struct fw_hdr *)data)->len512) * 512;
1402 static int cxgb4_validate_bootcfg_image(const u8 *data, u32 *size)
1404 struct cxgb4_bootcfg_data *header;
1406 header = (struct cxgb4_bootcfg_data *)data;
1407 if (le16_to_cpu(header->signature) != BOOT_CFG_SIG)
1413 static int cxgb4_validate_boot_image(const u8 *data, u32 *size)
1415 struct cxgb4_pci_exp_rom_header *exp_header;
1416 struct cxgb4_pcir_data *pcir_header;
1417 struct legacy_pci_rom_hdr *header;
1418 const u8 *cur_header = data;
1421 exp_header = (struct cxgb4_pci_exp_rom_header *)data;
1423 if (le16_to_cpu(exp_header->signature) != BOOT_SIGNATURE)
1428 header = (struct legacy_pci_rom_hdr *)cur_header;
1429 pcir_offset = le16_to_cpu(header->pcir_offset);
1430 pcir_header = (struct cxgb4_pcir_data *)(cur_header +
1433 *size += header->size512 * 512;
1434 cur_header += header->size512 * 512;
1435 } while (!(pcir_header->indicator & CXGB4_HDR_INDI));
1441 static int cxgb4_ethtool_get_flash_region(const u8 *data, u32 *size)
1443 if (!cxgb4_validate_fw_image(data, size))
1444 return CXGB4_ETHTOOL_FLASH_FW;
1445 if (!cxgb4_validate_boot_image(data, size))
1446 return CXGB4_ETHTOOL_FLASH_BOOT;
1447 if (!cxgb4_validate_phy_image(data, size))
1448 return CXGB4_ETHTOOL_FLASH_PHY;
1449 if (!cxgb4_validate_bootcfg_image(data, size))
1450 return CXGB4_ETHTOOL_FLASH_BOOTCFG;
1455 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1457 struct adapter *adap = netdev2adap(netdev);
1458 const struct firmware *fw;
1459 unsigned int master;
1468 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
1469 master = PCIE_FW_MASTER_G(pcie_fw);
1470 if (pcie_fw & PCIE_FW_MASTER_VLD_F)
1472 /* if csiostor is the master return */
1473 if (master_vld && (master != adap->pf)) {
1474 dev_warn(adap->pdev_dev,
1475 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
1479 ef->data[sizeof(ef->data) - 1] = '\0';
1480 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1486 if (ef->region == ETHTOOL_FLASH_ALL_REGIONS) {
1487 while (fw_size > 0) {
1489 region = cxgb4_ethtool_get_flash_region(fw_data, &size);
1490 if (region < 0 || !size) {
1495 ret = cxgb4_ethtool_flash_region(netdev, fw_data, size,
1504 ret = cxgb4_ethtool_flash_region(netdev, fw_data, fw_size,
1509 release_firmware(fw);
1513 static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
1515 struct port_info *pi = netdev_priv(dev);
1516 struct adapter *adapter = pi->adapter;
1518 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1519 SOF_TIMESTAMPING_RX_SOFTWARE |
1520 SOF_TIMESTAMPING_SOFTWARE;
1522 ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
1523 SOF_TIMESTAMPING_TX_HARDWARE |
1524 SOF_TIMESTAMPING_RAW_HARDWARE;
1526 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1527 (1 << HWTSTAMP_TX_ON);
1529 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1530 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1531 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1532 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1533 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1534 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1536 if (adapter->ptp_clock)
1537 ts_info->phc_index = ptp_clock_index(adapter->ptp_clock);
1539 ts_info->phc_index = -1;
1544 static u32 get_rss_table_size(struct net_device *dev)
1546 const struct port_info *pi = netdev_priv(dev);
1548 return pi->rss_size;
1551 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
1553 const struct port_info *pi = netdev_priv(dev);
1554 unsigned int n = pi->rss_size;
1557 *hfunc = ETH_RSS_HASH_TOP;
1565 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
1569 struct port_info *pi = netdev_priv(dev);
1571 /* We require at least one supported parameter to be changed and no
1572 * change in any of the unsupported parameters
1575 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
1580 /* Interface must be brought up atleast once */
1581 if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
1582 for (i = 0; i < pi->rss_size; i++)
1585 return cxgb4_write_rss(pi, pi->rss);
1591 static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
1594 struct tid_info *t = &adap->tids;
1595 struct filter_entry *f;
1597 if (ftid < t->nhpftids)
1598 f = &adap->tids.hpftid_tab[ftid];
1599 else if (ftid < t->nftids)
1600 f = &adap->tids.ftid_tab[ftid - t->nhpftids];
1602 f = lookup_tid(&adap->tids, ftid);
1607 static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
1608 struct ch_filter_specification *dfs)
1610 switch (dfs->val.proto) {
1613 fs->flow_type = TCP_V6_FLOW;
1615 fs->flow_type = TCP_V4_FLOW;
1619 fs->flow_type = UDP_V6_FLOW;
1621 fs->flow_type = UDP_V4_FLOW;
1626 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->val.fport);
1627 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->mask.fport);
1628 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->val.lport);
1629 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->mask.lport);
1630 memcpy(&fs->h_u.tcp_ip6_spec.ip6src, &dfs->val.fip[0],
1631 sizeof(fs->h_u.tcp_ip6_spec.ip6src));
1632 memcpy(&fs->m_u.tcp_ip6_spec.ip6src, &dfs->mask.fip[0],
1633 sizeof(fs->m_u.tcp_ip6_spec.ip6src));
1634 memcpy(&fs->h_u.tcp_ip6_spec.ip6dst, &dfs->val.lip[0],
1635 sizeof(fs->h_u.tcp_ip6_spec.ip6dst));
1636 memcpy(&fs->m_u.tcp_ip6_spec.ip6dst, &dfs->mask.lip[0],
1637 sizeof(fs->m_u.tcp_ip6_spec.ip6dst));
1638 fs->h_u.tcp_ip6_spec.tclass = dfs->val.tos;
1639 fs->m_u.tcp_ip6_spec.tclass = dfs->mask.tos;
1641 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->val.fport);
1642 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->mask.fport);
1643 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->val.lport);
1644 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->mask.lport);
1645 memcpy(&fs->h_u.tcp_ip4_spec.ip4src, &dfs->val.fip[0],
1646 sizeof(fs->h_u.tcp_ip4_spec.ip4src));
1647 memcpy(&fs->m_u.tcp_ip4_spec.ip4src, &dfs->mask.fip[0],
1648 sizeof(fs->m_u.tcp_ip4_spec.ip4src));
1649 memcpy(&fs->h_u.tcp_ip4_spec.ip4dst, &dfs->val.lip[0],
1650 sizeof(fs->h_u.tcp_ip4_spec.ip4dst));
1651 memcpy(&fs->m_u.tcp_ip4_spec.ip4dst, &dfs->mask.lip[0],
1652 sizeof(fs->m_u.tcp_ip4_spec.ip4dst));
1653 fs->h_u.tcp_ip4_spec.tos = dfs->val.tos;
1654 fs->m_u.tcp_ip4_spec.tos = dfs->mask.tos;
1656 fs->h_ext.vlan_tci = cpu_to_be16(dfs->val.ivlan);
1657 fs->m_ext.vlan_tci = cpu_to_be16(dfs->mask.ivlan);
1658 fs->flow_type |= FLOW_EXT;
1660 if (dfs->action == FILTER_DROP)
1661 fs->ring_cookie = RX_CLS_FLOW_DISC;
1663 fs->ring_cookie = dfs->iq;
1666 static int cxgb4_ntuple_get_filter(struct net_device *dev,
1667 struct ethtool_rxnfc *cmd,
1670 const struct port_info *pi = netdev_priv(dev);
1671 struct adapter *adap = netdev2adap(dev);
1672 struct filter_entry *f;
1675 if (!(adap->flags & CXGB4_FULL_INIT_DONE))
1678 /* Check for maximum filter range */
1679 if (!adap->ethtool_filters)
1682 if (loc >= adap->ethtool_filters->nentries)
1685 if (!test_bit(loc, adap->ethtool_filters->port[pi->port_id].bmap))
1688 ftid = adap->ethtool_filters->port[pi->port_id].loc_array[loc];
1690 /* Fetch filter_entry */
1691 f = cxgb4_get_filter_entry(adap, ftid);
1693 cxgb4_fill_filter_rule(&cmd->fs, &f->fs);
1698 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1701 const struct port_info *pi = netdev_priv(dev);
1702 struct adapter *adap = netdev2adap(dev);
1703 unsigned int count = 0, index = 0;
1706 switch (info->cmd) {
1707 case ETHTOOL_GRXFH: {
1708 unsigned int v = pi->rss_mode;
1711 switch (info->flow_type) {
1713 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
1714 info->data = RXH_IP_SRC | RXH_IP_DST |
1715 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1716 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1717 info->data = RXH_IP_SRC | RXH_IP_DST;
1720 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
1721 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1722 info->data = RXH_IP_SRC | RXH_IP_DST |
1723 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1724 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1725 info->data = RXH_IP_SRC | RXH_IP_DST;
1728 case AH_ESP_V4_FLOW:
1730 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1731 info->data = RXH_IP_SRC | RXH_IP_DST;
1734 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
1735 info->data = RXH_IP_SRC | RXH_IP_DST |
1736 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1737 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1738 info->data = RXH_IP_SRC | RXH_IP_DST;
1741 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
1742 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1743 info->data = RXH_IP_SRC | RXH_IP_DST |
1744 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1745 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1746 info->data = RXH_IP_SRC | RXH_IP_DST;
1749 case AH_ESP_V6_FLOW:
1751 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1752 info->data = RXH_IP_SRC | RXH_IP_DST;
1757 case ETHTOOL_GRXRINGS:
1758 info->data = pi->nqsets;
1760 case ETHTOOL_GRXCLSRLCNT:
1762 adap->ethtool_filters->port[pi->port_id].in_use;
1764 case ETHTOOL_GRXCLSRULE:
1765 return cxgb4_ntuple_get_filter(dev, info, info->fs.location);
1766 case ETHTOOL_GRXCLSRLALL:
1767 info->data = adap->ethtool_filters->nentries;
1768 while (count < info->rule_cnt) {
1769 ret = cxgb4_ntuple_get_filter(dev, info, index);
1771 rules[count++] = index;
1780 static int cxgb4_ntuple_del_filter(struct net_device *dev,
1781 struct ethtool_rxnfc *cmd)
1783 struct cxgb4_ethtool_filter_info *filter_info;
1784 struct adapter *adapter = netdev2adap(dev);
1785 struct port_info *pi = netdev_priv(dev);
1786 struct filter_entry *f;
1790 if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
1791 return -EAGAIN; /* can still change nfilters */
1793 if (!adapter->ethtool_filters)
1796 if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
1797 dev_err(adapter->pdev_dev,
1798 "Location must be < %u",
1799 adapter->ethtool_filters->nentries);
1803 filter_info = &adapter->ethtool_filters->port[pi->port_id];
1805 if (!test_bit(cmd->fs.location, filter_info->bmap))
1808 filter_id = filter_info->loc_array[cmd->fs.location];
1809 f = cxgb4_get_filter_entry(adapter, filter_id);
1811 ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
1815 clear_bit(cmd->fs.location, filter_info->bmap);
1816 filter_info->in_use--;
1822 /* Add Ethtool n-tuple filters. */
1823 static int cxgb4_ntuple_set_filter(struct net_device *netdev,
1824 struct ethtool_rxnfc *cmd)
1826 struct ethtool_rx_flow_spec_input input = {};
1827 struct cxgb4_ethtool_filter_info *filter_info;
1828 struct adapter *adapter = netdev2adap(netdev);
1829 struct port_info *pi = netdev_priv(netdev);
1830 struct ch_filter_specification fs;
1831 struct ethtool_rx_flow_rule *flow;
1835 if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
1836 return -EAGAIN; /* can still change nfilters */
1838 if (!adapter->ethtool_filters)
1841 if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
1842 dev_err(adapter->pdev_dev,
1843 "Location must be < %u",
1844 adapter->ethtool_filters->nentries);
1848 if (test_bit(cmd->fs.location,
1849 adapter->ethtool_filters->port[pi->port_id].bmap))
1852 memset(&fs, 0, sizeof(fs));
1854 input.fs = &cmd->fs;
1855 flow = ethtool_rx_flow_rule_create(&input);
1857 ret = PTR_ERR(flow);
1863 ret = cxgb4_flow_rule_replace(netdev, flow->rule, cmd->fs.location,
1868 filter_info = &adapter->ethtool_filters->port[pi->port_id];
1870 filter_info->loc_array[cmd->fs.location] = tid;
1871 set_bit(cmd->fs.location, filter_info->bmap);
1872 filter_info->in_use++;
1875 ethtool_rx_flow_rule_destroy(flow);
1880 static int set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1882 int ret = -EOPNOTSUPP;
1885 case ETHTOOL_SRXCLSRLINS:
1886 ret = cxgb4_ntuple_set_filter(dev, cmd);
1888 case ETHTOOL_SRXCLSRLDEL:
1889 ret = cxgb4_ntuple_del_filter(dev, cmd);
1898 static int set_dump(struct net_device *dev, struct ethtool_dump *eth_dump)
1900 struct adapter *adapter = netdev2adap(dev);
1903 len = sizeof(struct cudbg_hdr) +
1904 sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
1905 len += cxgb4_get_dump_length(adapter, eth_dump->flag);
1907 adapter->eth_dump.flag = eth_dump->flag;
1908 adapter->eth_dump.len = len;
1912 static int get_dump_flag(struct net_device *dev, struct ethtool_dump *eth_dump)
1914 struct adapter *adapter = netdev2adap(dev);
1916 eth_dump->flag = adapter->eth_dump.flag;
1917 eth_dump->len = adapter->eth_dump.len;
1918 eth_dump->version = adapter->eth_dump.version;
1922 static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
1925 struct adapter *adapter = netdev2adap(dev);
1929 if (adapter->eth_dump.flag == CXGB4_ETH_DUMP_NONE)
1932 len = sizeof(struct cudbg_hdr) +
1933 sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
1934 len += cxgb4_get_dump_length(adapter, adapter->eth_dump.flag);
1935 if (eth_dump->len < len)
1938 ret = cxgb4_cudbg_collect(adapter, buf, &len, adapter->eth_dump.flag);
1942 eth_dump->flag = adapter->eth_dump.flag;
1943 eth_dump->len = len;
1944 eth_dump->version = adapter->eth_dump.version;
1948 static int cxgb4_get_module_info(struct net_device *dev,
1949 struct ethtool_modinfo *modinfo)
1951 struct port_info *pi = netdev_priv(dev);
1952 u8 sff8472_comp, sff_diag_type, sff_rev;
1953 struct adapter *adapter = pi->adapter;
1956 if (!t4_is_inserted_mod_type(pi->mod_type))
1959 switch (pi->port_type) {
1960 case FW_PORT_TYPE_SFP:
1961 case FW_PORT_TYPE_QSA:
1962 case FW_PORT_TYPE_SFP28:
1963 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
1964 I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR,
1965 SFF_8472_COMP_LEN, &sff8472_comp);
1968 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
1969 I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR,
1970 SFP_DIAG_TYPE_LEN, &sff_diag_type);
1974 if (!sff8472_comp || (sff_diag_type & 4)) {
1975 modinfo->type = ETH_MODULE_SFF_8079;
1976 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
1978 modinfo->type = ETH_MODULE_SFF_8472;
1979 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1983 case FW_PORT_TYPE_QSFP:
1984 case FW_PORT_TYPE_QSFP_10G:
1985 case FW_PORT_TYPE_CR_QSFP:
1986 case FW_PORT_TYPE_CR2_QSFP:
1987 case FW_PORT_TYPE_CR4_QSFP:
1988 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
1989 I2C_DEV_ADDR_A0, SFF_REV_ADDR,
1990 SFF_REV_LEN, &sff_rev);
1991 /* For QSFP type ports, revision value >= 3
1992 * means the SFP is 8636 compliant.
1996 if (sff_rev >= 0x3) {
1997 modinfo->type = ETH_MODULE_SFF_8636;
1998 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2000 modinfo->type = ETH_MODULE_SFF_8436;
2001 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2012 static int cxgb4_get_module_eeprom(struct net_device *dev,
2013 struct ethtool_eeprom *eprom, u8 *data)
2015 int ret = 0, offset = eprom->offset, len = eprom->len;
2016 struct port_info *pi = netdev_priv(dev);
2017 struct adapter *adapter = pi->adapter;
2019 memset(data, 0, eprom->len);
2020 if (offset + len <= I2C_PAGE_SIZE)
2021 return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2022 I2C_DEV_ADDR_A0, offset, len, data);
2024 /* offset + len spans 0xa0 and 0xa1 pages */
2025 if (offset <= I2C_PAGE_SIZE) {
2026 /* read 0xa0 page */
2027 len = I2C_PAGE_SIZE - offset;
2028 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2029 I2C_DEV_ADDR_A0, offset, len, data);
2032 offset = I2C_PAGE_SIZE;
2033 /* Remaining bytes to be read from second page =
2034 * Total length - bytes read from first page
2036 len = eprom->len - len;
2038 /* Read additional optical diagnostics from page 0xa2 if supported */
2039 return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2,
2040 offset, len, &data[eprom->len - len]);
2043 static u32 cxgb4_get_priv_flags(struct net_device *netdev)
2045 struct port_info *pi = netdev_priv(netdev);
2046 struct adapter *adapter = pi->adapter;
2048 return (adapter->eth_flags | pi->eth_flags);
2052 * set_flags - set/unset specified flags if passed in new_flags
2053 * @cur_flags: pointer to current flags
2054 * @new_flags: new incoming flags
2055 * @flags: set of flags to set/unset
2057 static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags)
2059 *cur_flags = (*cur_flags & ~flags) | (new_flags & flags);
2062 static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
2064 struct port_info *pi = netdev_priv(netdev);
2065 struct adapter *adapter = pi->adapter;
2067 set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP);
2068 set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT);
2073 static void cxgb4_lb_test(struct net_device *netdev, u64 *lb_status)
2075 int dev_state = netif_running(netdev);
2078 netif_tx_stop_all_queues(netdev);
2079 netif_carrier_off(netdev);
2082 *lb_status = cxgb4_selftest_lb_pkt(netdev);
2085 netif_tx_start_all_queues(netdev);
2086 netif_carrier_on(netdev);
2090 static void cxgb4_self_test(struct net_device *netdev,
2091 struct ethtool_test *eth_test, u64 *data)
2093 struct port_info *pi = netdev_priv(netdev);
2094 struct adapter *adap = pi->adapter;
2096 memset(data, 0, sizeof(u64) * CXGB4_ETHTOOL_MAX_TEST);
2098 if (!(adap->flags & CXGB4_FULL_INIT_DONE) ||
2099 !(adap->flags & CXGB4_FW_OK)) {
2100 eth_test->flags |= ETH_TEST_FL_FAILED;
2104 if (eth_test->flags & ETH_TEST_FL_OFFLINE)
2105 cxgb4_lb_test(netdev, &data[CXGB4_ETHTOOL_LB_TEST]);
2107 if (data[CXGB4_ETHTOOL_LB_TEST])
2108 eth_test->flags |= ETH_TEST_FL_FAILED;
2111 static const struct ethtool_ops cxgb_ethtool_ops = {
2112 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2113 ETHTOOL_COALESCE_RX_MAX_FRAMES |
2114 ETHTOOL_COALESCE_TX_USECS_IRQ |
2115 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2116 .get_link_ksettings = get_link_ksettings,
2117 .set_link_ksettings = set_link_ksettings,
2118 .get_fecparam = get_fecparam,
2119 .set_fecparam = set_fecparam,
2120 .get_drvinfo = get_drvinfo,
2121 .get_msglevel = get_msglevel,
2122 .set_msglevel = set_msglevel,
2123 .get_ringparam = get_sge_param,
2124 .set_ringparam = set_sge_param,
2125 .get_coalesce = get_coalesce,
2126 .set_coalesce = set_coalesce,
2127 .get_eeprom_len = get_eeprom_len,
2128 .get_eeprom = get_eeprom,
2129 .set_eeprom = set_eeprom,
2130 .get_pauseparam = get_pauseparam,
2131 .set_pauseparam = set_pauseparam,
2132 .get_link = ethtool_op_get_link,
2133 .get_strings = get_strings,
2134 .set_phys_id = identify_port,
2135 .nway_reset = restart_autoneg,
2136 .get_sset_count = get_sset_count,
2137 .get_ethtool_stats = get_stats,
2138 .get_regs_len = get_regs_len,
2139 .get_regs = get_regs,
2140 .get_rxnfc = get_rxnfc,
2141 .set_rxnfc = set_rxnfc,
2142 .get_rxfh_indir_size = get_rss_table_size,
2143 .get_rxfh = get_rss_table,
2144 .set_rxfh = set_rss_table,
2145 .self_test = cxgb4_self_test,
2146 .flash_device = set_flash,
2147 .get_ts_info = get_ts_info,
2148 .set_dump = set_dump,
2149 .get_dump_flag = get_dump_flag,
2150 .get_dump_data = get_dump_data,
2151 .get_module_info = cxgb4_get_module_info,
2152 .get_module_eeprom = cxgb4_get_module_eeprom,
2153 .get_priv_flags = cxgb4_get_priv_flags,
2154 .set_priv_flags = cxgb4_set_priv_flags,
2157 void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
2159 struct cxgb4_ethtool_filter_info *eth_filter_info;
2162 if (!adap->ethtool_filters)
2165 eth_filter_info = adap->ethtool_filters->port;
2167 if (eth_filter_info) {
2168 for (i = 0; i < adap->params.nports; i++) {
2169 kvfree(eth_filter_info[i].loc_array);
2170 kfree(eth_filter_info[i].bmap);
2172 kfree(eth_filter_info);
2175 kfree(adap->ethtool_filters);
2178 int cxgb4_init_ethtool_filters(struct adapter *adap)
2180 struct cxgb4_ethtool_filter_info *eth_filter_info;
2181 struct cxgb4_ethtool_filter *eth_filter;
2182 struct tid_info *tids = &adap->tids;
2186 eth_filter = kzalloc(sizeof(*eth_filter), GFP_KERNEL);
2190 eth_filter_info = kcalloc(adap->params.nports,
2191 sizeof(*eth_filter_info),
2193 if (!eth_filter_info) {
2195 goto free_eth_filter;
2198 eth_filter->port = eth_filter_info;
2200 nentries = tids->nhpftids + tids->nftids;
2201 if (is_hashfilter(adap))
2202 nentries += tids->nhash +
2203 (adap->tids.stid_base - adap->tids.tid_base);
2204 eth_filter->nentries = nentries;
2206 for (i = 0; i < adap->params.nports; i++) {
2207 eth_filter->port[i].loc_array = kvzalloc(nentries, GFP_KERNEL);
2208 if (!eth_filter->port[i].loc_array) {
2210 goto free_eth_finfo;
2213 eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries),
2214 sizeof(unsigned long),
2216 if (!eth_filter->port[i].bmap) {
2218 goto free_eth_finfo;
2222 adap->ethtool_filters = eth_filter;
2227 kfree(eth_filter->port[i].bmap);
2228 kvfree(eth_filter->port[i].loc_array);
2230 kfree(eth_filter_info);
2238 void cxgb4_set_ethtool_ops(struct net_device *netdev)
2240 netdev->ethtool_ops = &cxgb_ethtool_ops;