1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Ethernet driver
4 * Copyright (C) 2020 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/pci.h>
12 #include <linux/ethtool.h>
13 #include <linux/stddef.h>
14 #include <linux/etherdevice.h>
15 #include <linux/log2.h>
16 #include <linux/net_tstamp.h>
17 #include <linux/linkmode.h>
19 #include "otx2_common.h"
22 #define DRV_NAME "octeontx2-nicpf"
23 #define DRV_VF_NAME "octeontx2-nicvf"
26 char name[ETH_GSTRING_LEN];
31 #define OTX2_DEV_STAT(stat) { \
33 .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
36 /* Physical link config */
37 #define OTX2_ETHTOOL_SUPPORTED_MODES 0x638CCBF //110001110001100110010111111
44 static const struct otx2_stat otx2_dev_stats[] = {
45 OTX2_DEV_STAT(rx_ucast_frames),
46 OTX2_DEV_STAT(rx_bcast_frames),
47 OTX2_DEV_STAT(rx_mcast_frames),
49 OTX2_DEV_STAT(tx_ucast_frames),
50 OTX2_DEV_STAT(tx_bcast_frames),
51 OTX2_DEV_STAT(tx_mcast_frames),
54 /* Driver level stats */
55 #define OTX2_DRV_STAT(stat) { \
57 .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \
60 static const struct otx2_stat otx2_drv_stats[] = {
61 OTX2_DRV_STAT(rx_fcs_errs),
62 OTX2_DRV_STAT(rx_oversize_errs),
63 OTX2_DRV_STAT(rx_undersize_errs),
64 OTX2_DRV_STAT(rx_csum_errs),
65 OTX2_DRV_STAT(rx_len_errs),
66 OTX2_DRV_STAT(rx_other_errs),
69 static const struct otx2_stat otx2_queue_stats[] = {
74 static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
75 static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
76 static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
78 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf);
80 static void otx2_get_drvinfo(struct net_device *netdev,
81 struct ethtool_drvinfo *info)
83 struct otx2_nic *pfvf = netdev_priv(netdev);
85 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
86 strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
89 static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
91 int start_qidx = qset * pfvf->hw.rx_queues;
94 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
95 for (stats = 0; stats < otx2_n_queue_stats; stats++) {
96 sprintf(*data, "rxq%d: %s", qidx + start_qidx,
97 otx2_queue_stats[stats].name);
98 *data += ETH_GSTRING_LEN;
101 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
102 for (stats = 0; stats < otx2_n_queue_stats; stats++) {
103 sprintf(*data, "txq%d: %s", qidx + start_qidx,
104 otx2_queue_stats[stats].name);
105 *data += ETH_GSTRING_LEN;
110 static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
112 struct otx2_nic *pfvf = netdev_priv(netdev);
115 if (sset != ETH_SS_STATS)
118 for (stats = 0; stats < otx2_n_dev_stats; stats++) {
119 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
120 data += ETH_GSTRING_LEN;
123 for (stats = 0; stats < otx2_n_drv_stats; stats++) {
124 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
125 data += ETH_GSTRING_LEN;
128 otx2_get_qset_strings(pfvf, &data, 0);
130 for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
131 sprintf(data, "cgx_rxstat%d: ", stats);
132 data += ETH_GSTRING_LEN;
135 for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
136 sprintf(data, "cgx_txstat%d: ", stats);
137 data += ETH_GSTRING_LEN;
140 strcpy(data, "reset_count");
141 data += ETH_GSTRING_LEN;
142 sprintf(data, "Fec Corrected Errors: ");
143 data += ETH_GSTRING_LEN;
144 sprintf(data, "Fec Uncorrected Errors: ");
145 data += ETH_GSTRING_LEN;
148 static void otx2_get_qset_stats(struct otx2_nic *pfvf,
149 struct ethtool_stats *stats, u64 **data)
155 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
156 if (!otx2_update_rq_stats(pfvf, qidx)) {
157 for (stat = 0; stat < otx2_n_queue_stats; stat++)
161 for (stat = 0; stat < otx2_n_queue_stats; stat++)
162 *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats)
163 [otx2_queue_stats[stat].index];
166 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
167 if (!otx2_update_sq_stats(pfvf, qidx)) {
168 for (stat = 0; stat < otx2_n_queue_stats; stat++)
172 for (stat = 0; stat < otx2_n_queue_stats; stat++)
173 *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats)
174 [otx2_queue_stats[stat].index];
178 static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf)
183 mutex_lock(&pfvf->mbox.lock);
184 req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox);
188 if (!otx2_sync_mbox_msg(&pfvf->mbox))
191 mutex_unlock(&pfvf->mbox.lock);
195 /* Get device and per queue statistics */
196 static void otx2_get_ethtool_stats(struct net_device *netdev,
197 struct ethtool_stats *stats, u64 *data)
199 struct otx2_nic *pfvf = netdev_priv(netdev);
200 u64 fec_corr_blks, fec_uncorr_blks;
201 struct cgx_fw_data *rsp;
204 otx2_get_dev_stats(pfvf);
205 for (stat = 0; stat < otx2_n_dev_stats; stat++)
206 *(data++) = ((u64 *)&pfvf->hw.dev_stats)
207 [otx2_dev_stats[stat].index];
209 for (stat = 0; stat < otx2_n_drv_stats; stat++)
210 *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats)
211 [otx2_drv_stats[stat].index]);
213 otx2_get_qset_stats(pfvf, stats, &data);
214 otx2_update_lmac_stats(pfvf);
215 for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
216 *(data++) = pfvf->hw.cgx_rx_stats[stat];
217 for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
218 *(data++) = pfvf->hw.cgx_tx_stats[stat];
219 *(data++) = pfvf->reset_count;
221 fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
222 fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks;
224 rsp = otx2_get_fwdata(pfvf);
225 if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
226 !otx2_get_phy_fec_stats(pfvf)) {
227 /* Fetch fwdata again because it's been recently populated with
228 * latest PHY FEC stats.
230 rsp = otx2_get_fwdata(pfvf);
232 struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
234 if (pfvf->linfo.fec == OTX2_FEC_BASER) {
235 fec_corr_blks = p->brfec_corr_blks;
236 fec_uncorr_blks = p->brfec_uncorr_blks;
238 fec_corr_blks = p->rsfec_corr_cws;
239 fec_uncorr_blks = p->rsfec_uncorr_cws;
244 *(data++) = fec_corr_blks;
245 *(data++) = fec_uncorr_blks;
248 static int otx2_get_sset_count(struct net_device *netdev, int sset)
250 struct otx2_nic *pfvf = netdev_priv(netdev);
253 if (sset != ETH_SS_STATS)
256 qstats_count = otx2_n_queue_stats *
257 (pfvf->hw.rx_queues + pfvf->hw.tx_queues);
258 otx2_update_lmac_fec_stats(pfvf);
260 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
261 CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT
265 /* Get no of queues device supports and current queue count */
266 static void otx2_get_channels(struct net_device *dev,
267 struct ethtool_channels *channel)
269 struct otx2_nic *pfvf = netdev_priv(dev);
271 channel->max_rx = pfvf->hw.max_queues;
272 channel->max_tx = pfvf->hw.max_queues;
274 channel->rx_count = pfvf->hw.rx_queues;
275 channel->tx_count = pfvf->hw.tx_queues;
278 /* Set no of Tx, Rx queues to be used */
279 static int otx2_set_channels(struct net_device *dev,
280 struct ethtool_channels *channel)
282 struct otx2_nic *pfvf = netdev_priv(dev);
283 bool if_up = netif_running(dev);
286 if (!channel->rx_count || !channel->tx_count)
290 dev->netdev_ops->ndo_stop(dev);
292 err = otx2_set_real_num_queues(dev, channel->tx_count,
297 pfvf->hw.rx_queues = channel->rx_count;
298 pfvf->hw.tx_queues = channel->tx_count;
299 pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
303 dev->netdev_ops->ndo_open(dev);
305 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
306 pfvf->hw.tx_queues, pfvf->hw.rx_queues);
311 static void otx2_get_pauseparam(struct net_device *netdev,
312 struct ethtool_pauseparam *pause)
314 struct otx2_nic *pfvf = netdev_priv(netdev);
315 struct cgx_pause_frm_cfg *req, *rsp;
317 if (is_otx2_lbkvf(pfvf->pdev))
320 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
324 if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
325 rsp = (struct cgx_pause_frm_cfg *)
326 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
327 pause->rx_pause = rsp->rx_pause;
328 pause->tx_pause = rsp->tx_pause;
332 static int otx2_set_pauseparam(struct net_device *netdev,
333 struct ethtool_pauseparam *pause)
335 struct otx2_nic *pfvf = netdev_priv(netdev);
340 if (is_otx2_lbkvf(pfvf->pdev))
344 pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
346 pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
349 pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
351 pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
353 return otx2_config_pause_frm(pfvf);
356 static void otx2_get_ringparam(struct net_device *netdev,
357 struct ethtool_ringparam *ring)
359 struct otx2_nic *pfvf = netdev_priv(netdev);
360 struct otx2_qset *qs = &pfvf->qset;
362 ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX);
363 ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
364 ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
365 ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
368 static int otx2_set_ringparam(struct net_device *netdev,
369 struct ethtool_ringparam *ring)
371 struct otx2_nic *pfvf = netdev_priv(netdev);
372 bool if_up = netif_running(netdev);
373 struct otx2_qset *qs = &pfvf->qset;
374 u32 rx_count, tx_count;
376 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
379 /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
380 rx_count = ring->rx_pending;
381 /* On some silicon variants a skid or reserved CQEs are
382 * needed to avoid CQ overflow.
384 if (rx_count < pfvf->hw.rq_skid)
385 rx_count = pfvf->hw.rq_skid;
386 rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
388 /* Due pipelining impact minimum 2000 unused SQ CQE's
389 * need to be maintained to avoid CQ overflow, hence the
392 tx_count = clamp_t(u32, ring->tx_pending,
393 Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
394 tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
396 if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt)
400 netdev->netdev_ops->ndo_stop(netdev);
402 /* Assigned to the nearest possible exponent. */
403 qs->sqe_cnt = tx_count;
404 qs->rqe_cnt = rx_count;
407 netdev->netdev_ops->ndo_open(netdev);
412 static int otx2_get_coalesce(struct net_device *netdev,
413 struct ethtool_coalesce *cmd)
415 struct otx2_nic *pfvf = netdev_priv(netdev);
416 struct otx2_hw *hw = &pfvf->hw;
418 cmd->rx_coalesce_usecs = hw->cq_time_wait;
419 cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
420 cmd->tx_coalesce_usecs = hw->cq_time_wait;
421 cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
426 static int otx2_set_coalesce(struct net_device *netdev,
427 struct ethtool_coalesce *ec)
429 struct otx2_nic *pfvf = netdev_priv(netdev);
430 struct otx2_hw *hw = &pfvf->hw;
433 if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
436 /* 'cq_time_wait' is 8bit and is in multiple of 100ns,
437 * so clamp the user given value to the range of 1 to 25usec.
439 ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs,
440 1, CQ_TIMER_THRESH_MAX);
441 ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs,
442 1, CQ_TIMER_THRESH_MAX);
444 /* Rx and Tx are mapped to same CQ, check which one
445 * is changed, if both then choose the min.
447 if (hw->cq_time_wait == ec->rx_coalesce_usecs)
448 hw->cq_time_wait = ec->tx_coalesce_usecs;
449 else if (hw->cq_time_wait == ec->tx_coalesce_usecs)
450 hw->cq_time_wait = ec->rx_coalesce_usecs;
452 hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs,
453 ec->tx_coalesce_usecs);
455 /* Max ecount_wait supported is 16bit,
456 * so clamp the user given value to the range of 1 to 64k.
458 ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames,
460 ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames,
463 /* Rx and Tx are mapped to same CQ, check which one
464 * is changed, if both then choose the min.
466 if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames)
467 hw->cq_ecount_wait = ec->tx_max_coalesced_frames;
468 else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames)
469 hw->cq_ecount_wait = ec->rx_max_coalesced_frames;
471 hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
472 ec->tx_max_coalesced_frames);
474 if (netif_running(netdev)) {
475 for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
476 otx2_config_irq_coalescing(pfvf, qidx);
482 static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
483 struct ethtool_rxnfc *nfc)
485 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
487 if (!(rss->flowkey_cfg &
488 (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
491 /* Mimimum is IPv4 and IPv6, SIP/DIP */
492 nfc->data = RXH_IP_SRC | RXH_IP_DST;
493 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN)
494 nfc->data |= RXH_VLAN;
496 switch (nfc->flow_type) {
499 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP)
500 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
504 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP)
505 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
509 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP)
510 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
514 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP)
515 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
532 static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
533 struct ethtool_rxnfc *nfc)
535 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
536 u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
537 u32 rss_cfg = rss->flowkey_cfg;
540 netdev_err(pfvf->netdev,
541 "RSS is disabled, cannot change settings\n");
545 /* Mimimum is IPv4 and IPv6, SIP/DIP */
546 if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
549 if (nfc->data & RXH_VLAN)
550 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN;
552 rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN;
554 switch (nfc->flow_type) {
557 /* Different config for v4 and v6 is not supported.
558 * Both of them have to be either 4-tuple or 2-tuple.
560 switch (nfc->data & rxh_l4) {
562 rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP;
564 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
565 rss_cfg |= NIX_FLOW_KEY_TYPE_TCP;
573 switch (nfc->data & rxh_l4) {
575 rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP;
577 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
578 rss_cfg |= NIX_FLOW_KEY_TYPE_UDP;
586 switch (nfc->data & rxh_l4) {
588 rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP;
590 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
591 rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP;
599 switch (nfc->data & rxh_l4) {
601 rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP |
602 NIX_FLOW_KEY_TYPE_AH);
603 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN |
604 NIX_FLOW_KEY_TYPE_IPV4_PROTO;
606 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
607 /* If VLAN hashing is also requested for ESP then do not
608 * allow because of hardware 40 bytes flow key limit.
610 if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) {
611 netdev_err(pfvf->netdev,
612 "RSS hash of ESP or AH with VLAN is not supported\n");
616 rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH;
617 /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes)
618 * and ESP SPI+sequence(8 bytes) uses hardware maximum
619 * limit of 40 byte flow key.
621 rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO;
629 rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
635 rss->flowkey_cfg = rss_cfg;
636 otx2_set_flowkey_cfg(pfvf);
640 static int otx2_get_rxnfc(struct net_device *dev,
641 struct ethtool_rxnfc *nfc, u32 *rules)
643 struct otx2_nic *pfvf = netdev_priv(dev);
644 int ret = -EOPNOTSUPP;
647 case ETHTOOL_GRXRINGS:
648 nfc->data = pfvf->hw.rx_queues;
651 case ETHTOOL_GRXCLSRLCNT:
652 nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
655 case ETHTOOL_GRXCLSRULE:
656 ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
658 case ETHTOOL_GRXCLSRLALL:
659 ret = otx2_get_all_flows(pfvf, nfc, rules);
662 return otx2_get_rss_hash_opts(pfvf, nfc);
669 static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
671 bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
672 struct otx2_nic *pfvf = netdev_priv(dev);
673 int ret = -EOPNOTSUPP;
677 ret = otx2_set_rss_hash_opts(pfvf, nfc);
679 case ETHTOOL_SRXCLSRLINS:
680 if (netif_running(dev) && ntuple)
681 ret = otx2_add_flow(pfvf, nfc);
683 case ETHTOOL_SRXCLSRLDEL:
684 if (netif_running(dev) && ntuple)
685 ret = otx2_remove_flow(pfvf, nfc->fs.location);
694 static int otx2vf_get_rxnfc(struct net_device *dev,
695 struct ethtool_rxnfc *nfc, u32 *rules)
697 struct otx2_nic *pfvf = netdev_priv(dev);
698 int ret = -EOPNOTSUPP;
701 case ETHTOOL_GRXRINGS:
702 nfc->data = pfvf->hw.rx_queues;
706 return otx2_get_rss_hash_opts(pfvf, nfc);
713 static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
715 struct otx2_nic *pfvf = netdev_priv(dev);
716 int ret = -EOPNOTSUPP;
720 ret = otx2_set_rss_hash_opts(pfvf, nfc);
729 static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
731 struct otx2_nic *pfvf = netdev_priv(netdev);
732 struct otx2_rss_info *rss;
734 rss = &pfvf->hw.rss_info;
736 return sizeof(rss->key);
739 static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
741 return MAX_RSS_INDIR_TBL_SIZE;
744 static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
746 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
748 otx2_rss_ctx_flow_del(pfvf, ctx_id);
749 kfree(rss->rss_ctx[ctx_id]);
750 rss->rss_ctx[ctx_id] = NULL;
755 static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
758 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
761 for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
762 if (!rss->rss_ctx[ctx])
765 if (ctx == MAX_RSS_GROUPS)
768 rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
769 if (!rss->rss_ctx[ctx])
776 /* RSS context configuration */
777 static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
778 const u8 *hkey, const u8 hfunc,
779 u32 *rss_context, bool delete)
781 struct otx2_nic *pfvf = netdev_priv(dev);
782 struct otx2_rss_ctx *rss_ctx;
783 struct otx2_rss_info *rss;
786 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
789 rss = &pfvf->hw.rss_info;
792 netdev_err(dev, "RSS is disabled, cannot change settings\n");
797 memcpy(rss->key, hkey, sizeof(rss->key));
798 otx2_set_rss_key(pfvf);
801 return otx2_rss_ctx_delete(pfvf, *rss_context);
803 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
804 ret = otx2_rss_ctx_create(pfvf, rss_context);
809 rss_ctx = rss->rss_ctx[*rss_context];
810 for (idx = 0; idx < rss->rss_size; idx++)
811 rss_ctx->ind_tbl[idx] = indir[idx];
813 otx2_set_rss_table(pfvf, *rss_context);
818 static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
819 u8 *hkey, u8 *hfunc, u32 rss_context)
821 struct otx2_nic *pfvf = netdev_priv(dev);
822 struct otx2_rss_ctx *rss_ctx;
823 struct otx2_rss_info *rss;
826 rss = &pfvf->hw.rss_info;
829 *hfunc = ETH_RSS_HASH_TOP;
834 if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
835 rx_queues = pfvf->hw.rx_queues;
836 for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
837 indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
840 if (rss_context >= MAX_RSS_GROUPS)
843 rss_ctx = rss->rss_ctx[rss_context];
848 for (idx = 0; idx < rss->rss_size; idx++)
849 indir[idx] = rss_ctx->ind_tbl[idx];
852 memcpy(hkey, rss->key, sizeof(rss->key));
857 /* Get RSS configuration */
858 static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
861 return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
862 DEFAULT_RSS_CONTEXT_GROUP);
865 /* Configure RSS table and hash key */
866 static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
867 const u8 *hkey, const u8 hfunc)
870 u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
872 return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
875 static u32 otx2_get_msglevel(struct net_device *netdev)
877 struct otx2_nic *pfvf = netdev_priv(netdev);
879 return pfvf->msg_enable;
882 static void otx2_set_msglevel(struct net_device *netdev, u32 val)
884 struct otx2_nic *pfvf = netdev_priv(netdev);
886 pfvf->msg_enable = val;
889 static u32 otx2_get_link(struct net_device *netdev)
891 struct otx2_nic *pfvf = netdev_priv(netdev);
893 /* LBK link is internal and always UP */
894 if (is_otx2_lbkvf(pfvf->pdev))
896 return pfvf->linfo.link_up;
899 static int otx2_get_ts_info(struct net_device *netdev,
900 struct ethtool_ts_info *info)
902 struct otx2_nic *pfvf = netdev_priv(netdev);
905 return ethtool_op_get_ts_info(netdev, info);
907 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
908 SOF_TIMESTAMPING_RX_SOFTWARE |
909 SOF_TIMESTAMPING_SOFTWARE |
910 SOF_TIMESTAMPING_TX_HARDWARE |
911 SOF_TIMESTAMPING_RX_HARDWARE |
912 SOF_TIMESTAMPING_RAW_HARDWARE;
914 info->phc_index = otx2_ptp_clock_index(pfvf);
916 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
918 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
919 (1 << HWTSTAMP_FILTER_ALL);
924 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf)
926 struct cgx_fw_data *rsp = NULL;
930 mutex_lock(&pfvf->mbox.lock);
931 req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox);
933 mutex_unlock(&pfvf->mbox.lock);
934 return ERR_PTR(-ENOMEM);
937 err = otx2_sync_mbox_msg(&pfvf->mbox);
939 rsp = (struct cgx_fw_data *)
940 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
945 mutex_unlock(&pfvf->mbox.lock);
949 static int otx2_get_fecparam(struct net_device *netdev,
950 struct ethtool_fecparam *fecparam)
952 struct otx2_nic *pfvf = netdev_priv(netdev);
953 struct cgx_fw_data *rsp;
958 ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS};
959 #define FEC_MAX_INDEX 4
960 if (pfvf->linfo.fec < FEC_MAX_INDEX)
961 fecparam->active_fec = fec[pfvf->linfo.fec];
963 rsp = otx2_get_fwdata(pfvf);
967 if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) {
968 if (!rsp->fwdata.supported_fec)
969 fecparam->fec = ETHTOOL_FEC_NONE;
971 fecparam->fec = fec[rsp->fwdata.supported_fec];
976 static int otx2_set_fecparam(struct net_device *netdev,
977 struct ethtool_fecparam *fecparam)
979 struct otx2_nic *pfvf = netdev_priv(netdev);
980 struct mbox *mbox = &pfvf->mbox;
981 struct fec_mode *req, *rsp;
982 int err = 0, fec = 0;
984 switch (fecparam->fec) {
985 /* Firmware does not support AUTO mode consider it as FEC_OFF */
986 case ETHTOOL_FEC_OFF:
987 case ETHTOOL_FEC_AUTO:
993 case ETHTOOL_FEC_BASER:
994 fec = OTX2_FEC_BASER;
997 netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d",
1002 if (fec == pfvf->linfo.fec)
1005 mutex_lock(&mbox->lock);
1006 req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox);
1012 err = otx2_sync_mbox_msg(&pfvf->mbox);
1016 rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
1019 pfvf->linfo.fec = rsp->fec;
1023 mutex_unlock(&mbox->lock);
1027 static void otx2_get_fec_info(u64 index, int req_mode,
1028 struct ethtool_link_ksettings *link_ksettings)
1030 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, };
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1037 case OTX2_FEC_BASER:
1038 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1045 case OTX2_FEC_BASER | OTX2_FEC_RS:
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1053 /* Add fec modes to existing modes */
1054 if (req_mode == OTX2_MODE_ADVERTISED)
1055 linkmode_or(link_ksettings->link_modes.advertising,
1056 link_ksettings->link_modes.advertising,
1059 linkmode_or(link_ksettings->link_modes.supported,
1060 link_ksettings->link_modes.supported,
1064 static void otx2_get_link_mode_info(u64 link_mode_bmap,
1066 struct ethtool_link_ksettings
1069 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
1070 const int otx2_sgmii_features[6] = {
1071 ETHTOOL_LINK_MODE_10baseT_Half_BIT,
1072 ETHTOOL_LINK_MODE_10baseT_Full_BIT,
1073 ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1074 ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1075 ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
1076 ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1078 /* CGX link modes to Ethtool link mode mapping */
1079 const int cgx_link_mode[27] = {
1081 ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1082 ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1083 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1084 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1085 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1087 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1090 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1091 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1092 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1093 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1094 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1097 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
1099 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1100 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
1101 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
1103 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1104 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1105 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1106 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
1110 link_mode_bmap = link_mode_bmap & OTX2_ETHTOOL_SUPPORTED_MODES;
1112 for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) {
1113 /* SGMII mode is set */
1115 linkmode_set_bit_array(otx2_sgmii_features,
1116 ARRAY_SIZE(otx2_sgmii_features),
1119 linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
1122 if (req_mode == OTX2_MODE_ADVERTISED)
1123 linkmode_copy(link_ksettings->link_modes.advertising,
1126 linkmode_copy(link_ksettings->link_modes.supported,
1130 static int otx2_get_link_ksettings(struct net_device *netdev,
1131 struct ethtool_link_ksettings *cmd)
1133 struct otx2_nic *pfvf = netdev_priv(netdev);
1134 struct cgx_fw_data *rsp = NULL;
1136 cmd->base.duplex = pfvf->linfo.full_duplex;
1137 cmd->base.speed = pfvf->linfo.speed;
1138 cmd->base.autoneg = pfvf->linfo.an;
1140 rsp = otx2_get_fwdata(pfvf);
1142 return PTR_ERR(rsp);
1144 if (rsp->fwdata.supported_an)
1145 ethtool_link_ksettings_add_link_mode(cmd,
1149 otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes,
1150 OTX2_MODE_ADVERTISED, cmd);
1151 otx2_get_fec_info(rsp->fwdata.advertised_fec,
1152 OTX2_MODE_ADVERTISED, cmd);
1153 otx2_get_link_mode_info(rsp->fwdata.supported_link_modes,
1154 OTX2_MODE_SUPPORTED, cmd);
1155 otx2_get_fec_info(rsp->fwdata.supported_fec,
1156 OTX2_MODE_SUPPORTED, cmd);
1160 static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd,
1165 /* Firmware does not support requesting multiple advertised modes
1166 * return first set bit
1168 bit_pos = find_first_bit(cmd->link_modes.advertising,
1169 __ETHTOOL_LINK_MODE_MASK_NBITS);
1170 if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS)
1174 static int otx2_set_link_ksettings(struct net_device *netdev,
1175 const struct ethtool_link_ksettings *cmd)
1177 struct otx2_nic *pf = netdev_priv(netdev);
1178 struct ethtool_link_ksettings cur_ks;
1179 struct cgx_set_link_mode_req *req;
1180 struct mbox *mbox = &pf->mbox;
1183 memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings));
1185 if (!ethtool_validate_speed(cmd->base.speed) ||
1186 !ethtool_validate_duplex(cmd->base.duplex))
1189 if (cmd->base.autoneg != AUTONEG_ENABLE &&
1190 cmd->base.autoneg != AUTONEG_DISABLE)
1193 otx2_get_link_ksettings(netdev, &cur_ks);
1195 /* Check requested modes against supported modes by hardware */
1196 if (!bitmap_subset(cmd->link_modes.advertising,
1197 cur_ks.link_modes.supported,
1198 __ETHTOOL_LINK_MODE_MASK_NBITS))
1201 mutex_lock(&mbox->lock);
1202 req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox);
1208 req->args.speed = cmd->base.speed;
1209 /* firmware expects 1 for half duplex and 0 for full duplex
1212 req->args.duplex = cmd->base.duplex ^ 0x1;
1213 req->args.an = cmd->base.autoneg;
1214 otx2_get_advertised_mode(cmd, &req->args.mode);
1216 err = otx2_sync_mbox_msg(&pf->mbox);
1218 mutex_unlock(&mbox->lock);
1222 static const struct ethtool_ops otx2_ethtool_ops = {
1223 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1224 ETHTOOL_COALESCE_MAX_FRAMES,
1225 .get_link = otx2_get_link,
1226 .get_drvinfo = otx2_get_drvinfo,
1227 .get_strings = otx2_get_strings,
1228 .get_ethtool_stats = otx2_get_ethtool_stats,
1229 .get_sset_count = otx2_get_sset_count,
1230 .set_channels = otx2_set_channels,
1231 .get_channels = otx2_get_channels,
1232 .get_ringparam = otx2_get_ringparam,
1233 .set_ringparam = otx2_set_ringparam,
1234 .get_coalesce = otx2_get_coalesce,
1235 .set_coalesce = otx2_set_coalesce,
1236 .get_rxnfc = otx2_get_rxnfc,
1237 .set_rxnfc = otx2_set_rxnfc,
1238 .get_rxfh_key_size = otx2_get_rxfh_key_size,
1239 .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
1240 .get_rxfh = otx2_get_rxfh,
1241 .set_rxfh = otx2_set_rxfh,
1242 .get_rxfh_context = otx2_get_rxfh_context,
1243 .set_rxfh_context = otx2_set_rxfh_context,
1244 .get_msglevel = otx2_get_msglevel,
1245 .set_msglevel = otx2_set_msglevel,
1246 .get_pauseparam = otx2_get_pauseparam,
1247 .set_pauseparam = otx2_set_pauseparam,
1248 .get_ts_info = otx2_get_ts_info,
1249 .get_fecparam = otx2_get_fecparam,
1250 .set_fecparam = otx2_set_fecparam,
1251 .get_link_ksettings = otx2_get_link_ksettings,
1252 .set_link_ksettings = otx2_set_link_ksettings,
1255 void otx2_set_ethtool_ops(struct net_device *netdev)
1257 netdev->ethtool_ops = &otx2_ethtool_ops;
1260 /* VF's ethtool APIs */
1261 static void otx2vf_get_drvinfo(struct net_device *netdev,
1262 struct ethtool_drvinfo *info)
1264 struct otx2_nic *vf = netdev_priv(netdev);
1266 strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
1267 strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
1270 static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
1272 struct otx2_nic *vf = netdev_priv(netdev);
1275 if (sset != ETH_SS_STATS)
1278 for (stats = 0; stats < otx2_n_dev_stats; stats++) {
1279 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
1280 data += ETH_GSTRING_LEN;
1283 for (stats = 0; stats < otx2_n_drv_stats; stats++) {
1284 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
1285 data += ETH_GSTRING_LEN;
1288 otx2_get_qset_strings(vf, &data, 0);
1290 strcpy(data, "reset_count");
1291 data += ETH_GSTRING_LEN;
1294 static void otx2vf_get_ethtool_stats(struct net_device *netdev,
1295 struct ethtool_stats *stats, u64 *data)
1297 struct otx2_nic *vf = netdev_priv(netdev);
1300 otx2_get_dev_stats(vf);
1301 for (stat = 0; stat < otx2_n_dev_stats; stat++)
1302 *(data++) = ((u64 *)&vf->hw.dev_stats)
1303 [otx2_dev_stats[stat].index];
1305 for (stat = 0; stat < otx2_n_drv_stats; stat++)
1306 *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats)
1307 [otx2_drv_stats[stat].index]);
1309 otx2_get_qset_stats(vf, stats, &data);
1310 *(data++) = vf->reset_count;
1313 static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
1315 struct otx2_nic *vf = netdev_priv(netdev);
1318 if (sset != ETH_SS_STATS)
1321 qstats_count = otx2_n_queue_stats *
1322 (vf->hw.rx_queues + vf->hw.tx_queues);
1324 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
1327 static int otx2vf_get_link_ksettings(struct net_device *netdev,
1328 struct ethtool_link_ksettings *cmd)
1330 struct otx2_nic *pfvf = netdev_priv(netdev);
1332 if (is_otx2_lbkvf(pfvf->pdev)) {
1333 cmd->base.duplex = DUPLEX_FULL;
1334 cmd->base.speed = SPEED_100000;
1336 return otx2_get_link_ksettings(netdev, cmd);
1341 static const struct ethtool_ops otx2vf_ethtool_ops = {
1342 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1343 ETHTOOL_COALESCE_MAX_FRAMES,
1344 .get_link = otx2_get_link,
1345 .get_drvinfo = otx2vf_get_drvinfo,
1346 .get_strings = otx2vf_get_strings,
1347 .get_ethtool_stats = otx2vf_get_ethtool_stats,
1348 .get_sset_count = otx2vf_get_sset_count,
1349 .set_channels = otx2_set_channels,
1350 .get_channels = otx2_get_channels,
1351 .get_rxnfc = otx2vf_get_rxnfc,
1352 .set_rxnfc = otx2vf_set_rxnfc,
1353 .get_rxfh_key_size = otx2_get_rxfh_key_size,
1354 .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
1355 .get_rxfh = otx2_get_rxfh,
1356 .set_rxfh = otx2_set_rxfh,
1357 .get_rxfh_context = otx2_get_rxfh_context,
1358 .set_rxfh_context = otx2_set_rxfh_context,
1359 .get_ringparam = otx2_get_ringparam,
1360 .set_ringparam = otx2_set_ringparam,
1361 .get_coalesce = otx2_get_coalesce,
1362 .set_coalesce = otx2_set_coalesce,
1363 .get_msglevel = otx2_get_msglevel,
1364 .set_msglevel = otx2_set_msglevel,
1365 .get_pauseparam = otx2_get_pauseparam,
1366 .set_pauseparam = otx2_set_pauseparam,
1367 .get_link_ksettings = otx2vf_get_link_ksettings,
1370 void otx2vf_set_ethtool_ops(struct net_device *netdev)
1372 netdev->ethtool_ops = &otx2vf_ethtool_ops;
1374 EXPORT_SYMBOL(otx2vf_set_ethtool_ops);